1
0
mirror of https://github.com/rcore-os/rCore.git synced 2024-11-23 08:26:17 +04:00

Receiving from e1000 driver works

This commit is contained in:
Jiajie Chen 2019-02-26 18:06:37 +08:00
parent 85ae4b1ba1
commit 7a3c66a923
4 changed files with 441 additions and 22 deletions

View File

@ -1,9 +1,11 @@
use x86_64::instructions::port::Port;
use crate::drivers::net::e1000;
use crate::logging::*;
use core::slice;
use x86_64::instructions::port::Port;
const VENDOR: u32 = 0x00;
const DEVICE: u32 = 0x02;
const COMMAND: u32 = 0x04;
const STATUS: u32 = 0x06;
const SUBCLASS: u32 = 0x0a;
const CLASS: u32 = 0x0b;
@ -24,10 +26,14 @@ const PCI_BASE_ADDRESS_MEM_TYPE_64: u32 = 0x04;
const PCI_BASE_ADDRESS_MEM_PREFETCH: u32 = 0x08;
const PCI_BASE_ADDRESS_MEM_MASK: u32 = 0xfffffff0;
struct PciTag(u32);
#[derive(Copy, Clone)]
pub struct PciTag(u32);
impl PciTag {
pub fn new(bus: u32, dev: u32, func: u32) -> PciTag {
assert!(bus < 256);
assert!(dev < 32);
assert!(func < 8);
PciTag(bus << 16 | dev << 11 | func << 8)
}
@ -86,7 +92,8 @@ impl PciTag {
// biscuit/src/pci/pci.go Pci_bar_mem
// linux/drivers/pci/probe.c pci_read_bases
pub unsafe fn getBarMem(&self, bar_number: u32) -> Option<&'static mut [u8]> {
// return (addr, len)
pub unsafe fn get_bar_mem(&self, bar_number: u32) -> Option<(usize, usize)> {
assert!(bar_number <= 4);
let bar = BAR0 + 4 * bar_number;
let mut base = self.read(bar, 4);
@ -113,23 +120,54 @@ impl PciTag {
}
size = (size & !(size - 1)) - 1;
debug!("device memory address from {:#X} to {:#X}", base, base + size);
return Some(slice::from_raw_parts_mut(base as *mut u8, size as usize));
debug!(
"device memory address from {:#X} to {:#X}",
base,
base + size
);
return Some((base as usize, size as usize));
}
pub fn describe(&self) -> bool {
// returns a tuple of (vid, did, next)
pub fn probe(&self) -> Option<(u32, u32, bool)> {
unsafe {
let v = self.read(VENDOR, 2);
if v == 0xffff {
return false;
return None;
}
let d = self.read(DEVICE, 2);
let mf = self.read(HEADER, 1);
let cl = self.read(CLASS, 1);
let scl = self.read(SUBCLASS, 1);
info!("{}: {}: {}: {:#X} {:#X} ({} {})", self.bus(), self.dev(), self.func(), v, d, cl, scl);
self.getBarMem(0);
return mf & 0x80 != 0;
info!(
"{}: {}: {}: {:#X} {:#X} ({} {})",
self.bus(),
self.dev(),
self.func(),
v,
d,
cl,
scl
);
return Some((v, d, mf & 0x80 != 0));
}
}
pub unsafe fn enable(&self) {
let orig = self.read(COMMAND, 2);
// IO_ENABLE | MEM_ENABLE | MASTER_ENABLE
self.write(COMMAND, orig | 0xf);
}
}
pub fn init_driver(vid: u32, did: u32, tag: PciTag) {
if vid == 0x8086 && (did == 0x100e || did == 0x10d3) {
if let Some((addr, len)) = unsafe { tag.get_bar_mem(0) } {
unsafe {
tag.enable();
}
e1000::e1000_init(addr, len);
}
}
}
@ -138,13 +176,17 @@ pub fn init() {
for bus in 0..256 {
for dev in 0..32 {
let tag = PciTag::new(bus, dev, 0);
if tag.describe() {
for func in 1..8 {
let tag = PciTag::new(bus, dev, func);
tag.describe();
if let Some((vid, did, next)) = tag.probe() {
init_driver(vid, did, tag);
if next {
for func in 1..8 {
let tag = PciTag::new(bus, dev, func);
if let Some((vid, did, _)) = tag.probe() {
init_driver(vid, did, tag);
}
}
}
}
}
}
info!("Init pci");
}
}

View File

@ -0,0 +1,381 @@
use alloc::alloc::{GlobalAlloc, Layout};
use alloc::format;
use alloc::prelude::*;
use alloc::sync::Arc;
use core::mem::size_of;
use core::slice;
use core::sync::atomic::{fence, Ordering};
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use smoltcp::phy::{self, DeviceCapabilities};
use smoltcp::time::Instant;
use smoltcp::wire::EthernetAddress;
use smoltcp::Result;
use volatile::{ReadOnly, Volatile};
use crate::arch::cpu;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::HEAP_ALLOCATOR;
use super::super::{DeviceType, Driver, NetDriver, DRIVERS, NET_DRIVERS};
pub struct E1000 {
header: usize,
size: usize,
mac: EthernetAddress,
send_page: usize,
send_buffers: Vec<usize>,
recv_page: usize,
recv_buffers: Vec<usize>,
first_trans: bool
}
#[derive(Clone)]
pub struct E1000Driver(Arc<Mutex<E1000>>);
impl Driver for E1000Driver {
fn try_handle_interrupt(&mut self) -> bool {
let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header, driver.size);
return false;
}
fn device_type(&self) -> DeviceType {
DeviceType::Net
}
}
impl E1000 {
fn transmit_available(&self) -> bool {
// TODO map it in all cpu
let mut current_addr = self.header;
while current_addr < self.header + self.size {
active_table().map_if_not_exists(current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let e1000 = unsafe { slice::from_raw_parts_mut(self.header as *mut Volatile<u32>, self.size / 4) };
let send_queue_size = PAGE_SIZE / size_of::<E1000SendDesc>();
let mut send_queue =
unsafe { slice::from_raw_parts_mut(self.send_page as *mut E1000RecvDesc, send_queue_size) };
let mut tdt = e1000[0x3818 / 4].read();
let index = (tdt as usize + 1) % send_queue_size;
let send_desc = &mut send_queue[index];
// TODO: fix it
return self.first_trans || (*send_desc).status & 1 != 0;
}
fn receive_available(&self) -> bool {
// TODO map it in all cpu
let mut current_addr = self.header;
while current_addr < self.header + self.size {
active_table().map_if_not_exists(current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let e1000 = unsafe { slice::from_raw_parts_mut(self.header as *mut Volatile<u32>, self.size / 4) };
let recv_queue_size = PAGE_SIZE / size_of::<E1000RecvDesc>();
let mut recv_queue =
unsafe { slice::from_raw_parts_mut(self.recv_page as *mut E1000RecvDesc, recv_queue_size) };
let mut rdt = e1000[0x2818 / 4].read();
let index = (rdt as usize + 1) % recv_queue_size;
let recv_desc = &mut recv_queue[index];
return (*recv_desc).status & 1 != 0;
}
}
impl NetDriver for E1000Driver {
fn get_mac(&self) -> EthernetAddress {
self.0.lock().mac
}
fn get_ifname(&self) -> String {
format!("e1000")
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct E1000SendDesc {
addr: u64,
len: u16,
cso: u8,
cmd: u8,
status: u8,
css: u8,
special: u8,
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct E1000RecvDesc {
addr: u64,
len: u16,
chksum: u16,
status: u16,
error: u8,
special: u8,
}
pub struct E1000RxToken(E1000Driver);
pub struct E1000TxToken(E1000Driver);
impl<'a> phy::Device<'a> for E1000Driver {
type RxToken = E1000RxToken;
type TxToken = E1000TxToken;
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> {
let driver = self.0.lock();
if driver.transmit_available() && driver.receive_available() {
// potential racing
Some((E1000RxToken(self.clone()), E1000TxToken(self.clone())))
} else {
None
}
}
fn transmit(&'a mut self) -> Option<Self::TxToken> {
let driver = self.0.lock();
if driver.transmit_available() {
Some(E1000TxToken(self.clone()))
} else {
None
}
}
fn capabilities(&self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
caps.max_transmission_unit = 1536;
caps.max_burst_size = Some(1);
caps
}
}
impl phy::RxToken for E1000RxToken {
fn consume<R, F>(self, _timestamp: Instant, f: F) -> Result<R>
where
F: FnOnce(&[u8]) -> Result<R>,
{
let data = {
let mut driver = (self.0).0.lock();
let e1000 = unsafe { slice::from_raw_parts_mut(driver.header as *mut Volatile<u32>, driver.size / 4) };
let recv_queue_size = PAGE_SIZE / size_of::<E1000RecvDesc>();
let mut recv_queue =
unsafe { slice::from_raw_parts_mut(driver.recv_page as *mut E1000RecvDesc, recv_queue_size) };
let mut rdt = e1000[0x2818 / 4].read();
let index = (rdt as usize + 1) % recv_queue_size;
let recv_desc = &mut recv_queue[index];
assert!(recv_desc.status & 1 != 0);
let buffer = unsafe { slice::from_raw_parts(driver.recv_buffers[index] as *const u8, recv_desc.len as usize) };
println!("{:?}", recv_desc);
for i in 0..recv_desc.len {
print!("{:#X} ", buffer[i as usize]);
}
println!("");
recv_desc.status = recv_desc.status & !1;
rdt = (rdt + 1) % recv_queue_size as u32;
e1000[0x2818 / 4].write(rdt);
buffer
};
let result = f(&data);
result
}
}
impl phy::TxToken for E1000TxToken {
fn consume<R, F>(self, _timestamp: Instant, len: usize, f: F) -> Result<R>
where
F: FnOnce(&mut [u8]) -> Result<R>,
{
let mut buffer = [0u8; 2048];
let result = f(&mut buffer[..len]);
let mut driver = (self.0).0.lock();
let e1000 = unsafe { slice::from_raw_parts_mut(driver.header as *mut Volatile<u32>, driver.size / 4) };
let send_queue_size = PAGE_SIZE / size_of::<E1000SendDesc>();
let mut send_queue =
unsafe { slice::from_raw_parts_mut(driver.send_page as *mut E1000SendDesc, send_queue_size) };
let mut tdt = e1000[0x3818 / 4].read();
let index_next = (tdt as usize + 1) % send_queue_size;
let send_desc = &mut send_queue[index_next];
assert!(driver.first_trans || send_desc.status & 1 != 0);
let index = (tdt as usize) % send_queue_size;
let send_desc = &mut send_queue[index];
let target = unsafe { slice::from_raw_parts_mut(driver.send_buffers[index] as *mut u8, len) };
target.copy_from_slice(&buffer[..len]);
println!("len {:?}", len);
let buffer_page_pa = active_table().get_entry(driver.send_buffers[index]).unwrap().target();
assert_eq!(buffer_page_pa, send_desc.addr as usize);
send_desc.len = len as u16 + 4;
send_desc.cmd = (1 << 3) | (1 << 0);
send_desc.status = 0;
println!("{:?}", &send_queue[index]);
for i in 0..len {
print!("{:#X} ", target[i]);
}
println!("tdh {} tdt {}", e1000[0x3810 / 4].read(), e1000[0x3818 / 4].read());
fence(Ordering::SeqCst);
tdt = (tdt + 2) % send_queue_size as u32;
e1000[0x3818 / 4].write(tdt);
fence(Ordering::SeqCst);
// round
if tdt == 0 {
driver.first_trans = false;
}
result
}
}
bitflags! {
struct E1000Status : u32 {
const FD = 1 << 0;
const LU = 1 << 1;
const TXOFF = 1 << 4;
const TBIMODE = 1 << 5;
const SPEED_100M = 1 << 6;
const SPEED_1000M = 1 << 7;
const ASDV_100M = 1 << 8;
const ASDV_1000M = 1 << 9;
const MTXCKOK = 1 << 10;
const PCI66 = 1 << 11;
const BUS64 = 1 << 12;
const PCIX_MODE = 1 << 13;
const GIO_MASTER_ENABLE = 1 << 19;
}
}
// JudgeDuck-OS/kern/e1000.c
pub fn e1000_init(header: usize, size: usize) {
info!("Probing e1000");
assert_eq!(size_of::<E1000SendDesc>(), 16);
assert_eq!(size_of::<E1000RecvDesc>(), 16);
let send_page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
let recv_page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
let send_page_pa = active_table().get_entry(send_page).unwrap().target();
let recv_page_pa = active_table().get_entry(recv_page).unwrap().target();
let send_queue_size = PAGE_SIZE / size_of::<E1000SendDesc>();
let recv_queue_size = PAGE_SIZE / size_of::<E1000RecvDesc>();
let mut send_queue =
unsafe { slice::from_raw_parts_mut(send_page as *mut E1000SendDesc, send_queue_size) };
let mut recv_queue =
unsafe { slice::from_raw_parts_mut(recv_page as *mut E1000RecvDesc, recv_queue_size) };
// randomly generated
let mac: [u8; 6] = [0x54, 0x51, 0x9F, 0x71, 0xC0, 0x3C];
let mut driver = E1000 {
header,
size,
mac: EthernetAddress::from_bytes(&mac),
send_page,
send_buffers: Vec::with_capacity(send_queue_size),
recv_page,
recv_buffers: Vec::with_capacity(recv_queue_size),
first_trans: true,
};
let mut current_addr = header;
while current_addr < header + size {
active_table().map_if_not_exists(current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let e1000 = unsafe { slice::from_raw_parts_mut(header as *mut Volatile<u32>, size / 4) };
debug!(
"status before setup: {:#?}",
E1000Status::from_bits_truncate(e1000[0x8 / 4].read())
);
e1000[0x3800 / 4].write(send_page_pa as u32); // TDBAL
e1000[0x3804 / 4].write((send_page_pa >> 32) as u32); // TDBAH
e1000[0x3808 / 8].write(PAGE_SIZE as u32); // TDLEN
e1000[0x3810 / 4].write(0); // TDH
e1000[0x3818 / 4].write(0); // TDT
for i in 0..send_queue_size {
let buffer_page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
let buffer_page_pa = active_table().get_entry(buffer_page).unwrap().target();
send_queue[i].addr = buffer_page_pa as u64;
driver.send_buffers.push(buffer_page);
}
e1000[0x400 / 4].write((1 << 1) | (1 << 3) | (0x10 << 4) | (0x40 << 12)); // TCTL
e1000[0x410 / 4].write(0xa | (0x8 << 10) | (0xc << 20)); // TIPG
let mut RAL: u32 = 0;
let mut RAH: u32 = 0;
for i in 0..4 {
RAL = RAL | (mac[i] as u32) << (i * 8);
}
for i in 0..2 {
RAH = RAH | (mac[i + 4] as u32) << (i * 8);
}
e1000[0x5400 / 4].write(RAL); // RAL
e1000[0x5404 / 4].write(RAH | (1 << 31)); // RAH
// MTA
for i in (0x5200 / 4)..(0x5400 / 4) {
e1000[i].write(0);
}
e1000[0xd0 / 4].write(0); // IMS
e1000[0x2800 / 4].write(recv_page_pa as u32); // RDBAL
e1000[0x2804 / 4].write((recv_page_pa >> 32) as u32); // RDBAH
e1000[0x2808 / 4].write(PAGE_SIZE as u32); // RDLEN
e1000[0x2810 / 4].write(0); // RDH
e1000[0x2818 / 4].write((recv_queue_size - 1) as u32); // RDT
for i in 0..recv_queue_size {
let buffer_page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
let buffer_page_pa = active_table().get_entry(buffer_page).unwrap().target();
recv_queue[i].addr = buffer_page_pa as u64;
driver.recv_buffers.push(buffer_page);
}
e1000[0x100 / 4].write((1 << 1) | (1 << 15) | (3 << 16) | (1 << 25) | (1 << 26)); // RCTL
debug!(
"status after setup: {:#?}",
E1000Status::from_bits_truncate(e1000[0x8 / 4].read())
);
let net_driver = E1000Driver(Arc::new(Mutex::new(driver)));
DRIVERS.lock().push(Box::new(net_driver.clone()));
NET_DRIVERS.lock().push(Box::new(net_driver));
}

View File

@ -1 +1,2 @@
pub mod virtio_net;
pub mod virtio_net;
pub mod e1000;

View File

@ -43,11 +43,6 @@ const VIRTIO_QUEUE_TRANSMIT: usize = 1;
impl Driver for VirtIONetDriver {
fn try_handle_interrupt(&mut self) -> bool {
// for simplicity
if cpu::id() > 0 {
return false
}
let driver = self.0.lock();
// ensure header page is mapped