Clean up Sparc32 memory management

Concentrate memory and MMU management (lib.c malloc, romvec opb_, iommu,
OF /memory) to lib.c.



git-svn-id: svn://coreboot.org/openbios/openbios-devel@344 f158a5a8-5612-0410-a976-696ce0be7e32
This commit is contained in:
Blue Swirl
2009-01-03 13:45:02 +00:00
parent ec53fcec45
commit 91148c0702
17 changed files with 571 additions and 522 deletions

View File

@@ -99,7 +99,7 @@ void boot(void)
obp_arg.argv[1] = param;
}
romvec = init_openprom(qemu_mem_size);
romvec = init_openprom();
if (kernel_size) {
int (*entry)(const void *romvec_ptr, int p2, int p3, int p4, int p5);

View File

@@ -24,7 +24,7 @@ extern struct context *__context;
unsigned int start_elf(unsigned long entry_point, unsigned long param);
// romvec.c
void *init_openprom(unsigned long memsize);
void *init_openprom(void);
// boot.c
extern struct sys_info sys_info;
@@ -39,3 +39,19 @@ extern struct linux_arguments_v0 obp_arg;
// openbios.c
extern int qemu_machine_type;
// arch/sparc32/lib.c
struct linux_mlist_v0;
extern struct linux_mlist_v0 *ptphys;
extern struct linux_mlist_v0 *ptmap;
extern struct linux_mlist_v0 *ptavail;
void ob_init_mmu(void);
void init_mmu_swift(void);
char *obp_dumb_mmap(char *va, int which_io, unsigned int pa,
unsigned int size);
void obp_dumb_munmap(__attribute__((unused)) char *va,
__attribute__((unused)) unsigned int size);
char *obp_dumb_memalloc(char *va, unsigned int size);
void obp_dumb_memfree(__attribute__((unused))char *va,
__attribute__((unused))unsigned sz);

View File

@@ -10,6 +10,7 @@
#include "openbios/drivers.h"
#include "openbios.h"
#include "video_subr.h"
#include "ofmem.h"
#ifdef CONFIG_DEBUG_CONSOLE

View File

@@ -7,12 +7,24 @@
* the copyright and warranty status of this work.
*/
#include "openbios/config.h"
#include "asm/types.h"
#include <stdarg.h>
#include "libc/stdlib.h"
#include "libc/vsprintf.h"
#include "openbios/kernel.h"
#include "openbios/bindings.h"
#include "ofmem.h"
#include "asm/asi.h"
#include "pgtsrmmu.h"
#include "openprom.h"
#include "sys_info.h"
#include "boot.h"
#define NCTX_SWIFT 0x100
#define LOWMEMSZ 32 * 1024 * 1024
#ifdef CONFIG_DEBUG_MEM
#define DPRINTF(fmt, args...) \
do { printk(fmt , ##args); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
/* Format a string and print it on the screen, just like the libc
* function printf.
@@ -32,6 +44,30 @@ int printk( const char *fmt, ... )
return i;
}
/*
* Allocatable memory chunk.
*/
struct mem {
char *start, *uplim;
char *curp;
};
static struct mem cmem; /* Current memory, virtual */
static struct mem cio; /* Current I/O space */
struct mem cdvmem; /* Current device virtual memory space */
unsigned int va_shift;
static unsigned long *context_table;
static unsigned long *l1;
static struct linux_mlist_v0 totphys[1];
static struct linux_mlist_v0 totmap[1];
static struct linux_mlist_v0 totavail[1];
struct linux_mlist_v0 *ptphys;
struct linux_mlist_v0 *ptmap;
struct linux_mlist_v0 *ptavail;
typedef struct alloc_desc {
struct alloc_desc *next;
int size; /* size (including) this struct */
@@ -63,16 +99,17 @@ static struct {
} ofmem;
#define ALLOC_BLOCK (64 * 1024)
void *malloc(int size)
// XXX should be posix_memalign
static int
posix_memalign2(void **memptr, size_t alignment, size_t size)
{
alloc_desc_t *d, **pp;
char *ret;
extern struct mem cmem;
if( !size )
return NULL;
return -1;
size = (size + 7) & ~7;
size = (size + (alignment - 1)) & ~(alignment - 1);
size += sizeof(alloc_desc_t);
/* look in the freelist */
@@ -84,7 +121,8 @@ void *malloc(int size)
ret = (char*)*pp + sizeof(alloc_desc_t);
memset( ret, 0, (**pp).size - sizeof(alloc_desc_t) );
*pp = (**pp).next;
return ret;
*memptr = ret;
return 0;
}
if( !ofmem.next_malloc || ofmem.left < size) {
@@ -101,13 +139,13 @@ void *malloc(int size)
sizeof(alloc_desc_t)));
}
ofmem.next_malloc = mem_alloc(&cmem, alloc_size, 4);
ofmem.next_malloc = mem_alloc(&cmem, alloc_size, 8);
ofmem.left = alloc_size;
}
if( ofmem.left < size) {
printk("out of malloc memory (%x)!\n", size );
return NULL;
return -1;
}
d = (alloc_desc_t*) ofmem.next_malloc;
ofmem.next_malloc += size;
@@ -118,7 +156,19 @@ void *malloc(int size)
ret = (char*)d + sizeof(alloc_desc_t);
memset( ret, 0, size - sizeof(alloc_desc_t) );
return ret;
*memptr = ret;
return 0;
}
void *malloc(int size)
{
int ret;
void *mem;
ret = posix_memalign2(&mem, 8, size);
if (ret != 0)
return NULL;
return mem;
}
void free(void *ptr)
@@ -156,3 +206,406 @@ realloc( void *ptr, size_t size )
free( ptr );
return p;
}
// XXX should be removed
int
posix_memalign(void **memptr, size_t alignment, size_t size)
{
void *block;
block = mem_alloc(&cmem, size, alignment);
if (!block)
return -1;
*memptr = block;
return 0;
}
/*
* Allocate memory. This is reusable.
*/
void
mem_init(struct mem *t, char *begin, char *limit)
{
t->start = begin;
t->uplim = limit;
t->curp = begin;
}
void *
mem_alloc(struct mem *t, int size, int align)
{
char *p;
unsigned long pa;
// The alignment restrictions refer to physical, not virtual
// addresses
pa = va2pa((unsigned long)t->curp) + (align - 1);
pa &= ~(align - 1);
p = (char *)pa2va(pa);
if ((unsigned long)p >= (unsigned long)t->uplim ||
(unsigned long)p + size > (unsigned long)t->uplim)
return NULL;
t->curp = p + size;
return p;
}
static unsigned long
find_pte(unsigned long va, int alloc)
{
uint32_t pte;
void *p;
unsigned long pa;
int ret;
pte = l1[(va >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1)];
if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
if (alloc) {
ret = posix_memalign(&p, SRMMU_PTRS_PER_PMD * sizeof(int),
SRMMU_PTRS_PER_PMD * sizeof(int));
if (ret != 0)
return ret;
pte = SRMMU_ET_PTD | ((va2pa((unsigned long)p)) >> 4);
l1[(va >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1)] = pte;
/* barrier() */
} else {
return -1;
}
}
pa = (pte & 0xFFFFFFF0) << 4;
pa += ((va >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)) << 2;
pte = *(uint32_t *)pa2va(pa);
if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
if (alloc) {
ret = posix_memalign(&p, SRMMU_PTRS_PER_PTE * sizeof(void *),
SRMMU_PTRS_PER_PTE * sizeof(void *));
if (ret != 0)
return ret;
pte = SRMMU_ET_PTD | ((va2pa((unsigned int)p)) >> 4);
*(uint32_t *)pa2va(pa) = pte;
} else {
return -2;
}
}
pa = (pte & 0xFFFFFFF0) << 4;
pa += ((va >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)) << 2;
return pa2va(pa);
}
/*
* Create a memory mapping from va to epa.
*/
int
map_page(unsigned long va, uint64_t epa, int type)
{
uint32_t pte;
unsigned long pa;
pa = find_pte(va, 1);
pte = SRMMU_ET_PTE | ((epa & PAGE_MASK) >> 4);
if (type) { /* I/O */
pte |= SRMMU_REF;
/* SRMMU cannot make Supervisor-only, but not exectutable */
pte |= SRMMU_PRIV;
} else { /* memory */
pte |= SRMMU_REF | SRMMU_CACHE;
pte |= SRMMU_PRIV; /* Supervisor only access */
}
*(uint32_t *)pa = pte;
DPRINTF("map_page: va 0x%lx pa 0x%llx pte 0x%x\n", va, epa, pte);
return 0;
}
static void map_pages(unsigned long va, uint64_t pa, int type,
unsigned long size)
{
unsigned long npages, off;
DPRINTF("map_pages: va 0x%lx, pa 0x%llx, size 0x%lx\n", va, pa, size);
off = pa & (PAGE_SIZE - 1);
npages = (off + (size - 1) + (PAGE_SIZE - 1)) / PAGE_SIZE;
pa &= ~(uint64_t)(PAGE_SIZE - 1);
while (npages-- != 0) {
map_page(va, pa, type);
va += PAGE_SIZE;
pa += PAGE_SIZE;
}
}
/*
* Create an I/O mapping to pa[size].
* Returns va of the mapping or 0 if unsuccessful.
*/
void *
map_io(uint64_t pa, int size)
{
unsigned long va;
unsigned int npages;
unsigned int off;
off = pa & (PAGE_SIZE - 1);
npages = (off + size - 1) / PAGE_SIZE + 1;
pa &= ~(PAGE_SIZE - 1);
va = (unsigned long)mem_alloc(&cio, npages * PAGE_SIZE, PAGE_SIZE);
if (va == 0)
return NULL;
map_pages(va, pa, 1, npages * PAGE_SIZE);
return (void *)(va + off);
}
/*
* D5.3 pgmap@ ( va -- pte )
*/
static void
pgmap_fetch(void)
{
uint32_t pte;
unsigned long va, pa;
va = POP();
pa = find_pte(va, 0);
if (pa == 1 || pa == 2)
goto error;
pte = *(uint32_t *)pa;
DPRINTF("pgmap@: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte);
PUSH(pte);
return;
error:
PUSH(0);
}
/*
* D5.3 pgmap! ( pte va -- )
*/
static void
pgmap_store(void)
{
uint32_t pte;
unsigned long va, pa;
va = POP();
pte = POP();
pa = find_pte(va, 1);
*(uint32_t *)pa = pte;
DPRINTF("pgmap!: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte);
}
/*
* D5.3 map-pages ( pa space va size -- )
*/
static void
ob_map_pages(void)
{
unsigned long va;
int size;
uint64_t pa;
size = POP();
va = POP();
pa = POP();
pa <<= 32;
pa |= POP() & 0xffffffff;
map_pages(va, pa, 0, size);
DPRINTF("map-page: va 0x%lx pa 0x%llx size 0x%x\n", va, pa, size);
}
static void
init_romvec_mem(void)
{
ptphys = totphys;
ptmap = totmap;
ptavail = totavail;
/*
* Form memory descriptors.
*/
totphys[0].theres_more = NULL;
totphys[0].start_adr = (char *) 0;
totphys[0].num_bytes = qemu_mem_size;
totavail[0].theres_more = NULL;
totavail[0].start_adr = (char *) 0;
totavail[0].num_bytes = va2pa((int)&_start) - PAGE_SIZE;
totmap[0].theres_more = NULL;
totmap[0].start_adr = &_start;
totmap[0].num_bytes = (unsigned long) &_iomem -
(unsigned long) &_start + PAGE_SIZE;
}
char *obp_dumb_mmap(char *va, int which_io, unsigned int pa,
unsigned int size)
{
uint64_t mpa = ((uint64_t)which_io << 32) | (uint64_t)pa;
map_pages((unsigned long)va, mpa, 0, size);
return va;
}
void obp_dumb_munmap(__attribute__((unused)) char *va,
__attribute__((unused)) unsigned int size)
{
DPRINTF("obp_dumb_munmap: virta 0x%x, sz %d\n", (unsigned int)va, size);
}
char *obp_dumb_memalloc(char *va, unsigned int size)
{
static unsigned int next_free_address = 0xFFEDA000;
size = (size + 7) & ~7;
// XXX should use normal memory alloc
totmap[0].num_bytes -= size;
DPRINTF("obp_dumb_memalloc va 0x%p size %x at 0x%x\n", va, size,
totmap[0].num_bytes);
// If va is null, the allocator is supposed to pick a "suitable" address.
// (See OpenSolaric prom_alloc.c) There's not any real guidance as
// to what might be "suitable". So we mimic the behavior of a Sun boot
// ROM.
if (va == NULL) {
// XXX should register virtual memory allocation
va = (char *)(next_free_address - size);
next_free_address -= size;
DPRINTF("obp_dumb_memalloc req null -> 0x%p\n", va);
}
map_pages((unsigned long)va, totmap[0].num_bytes, 0, size);
return va;
}
void obp_dumb_memfree(__attribute__((unused))char *va,
__attribute__((unused))unsigned sz)
{
DPRINTF("obp_dumb_memfree 0x%p (size %d)\n", va, sz);
}
void
ob_init_mmu(void)
{
push_str("/memory");
fword("find-device");
PUSH(0);
fword("encode-int");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH(qemu_mem_size);
fword("encode-int");
fword("encode+");
push_str("reg");
fword("property");
PUSH(0);
fword("encode-int");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH(va2pa((unsigned long)&_start) - PAGE_SIZE);
fword("encode-int");
fword("encode+");
push_str("available");
fword("property");
push_str("/virtual-memory");
fword("find-device");
PUSH(0);
fword("encode-int");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH((unsigned long)&_start - PAGE_SIZE);
fword("encode-int");
fword("encode+");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH(va2pa((unsigned long)&_iomem));
fword("encode-int");
fword("encode+");
PUSH(-va2pa((unsigned long)&_iomem));
fword("encode-int");
fword("encode+");
push_str("available");
fword("property");
PUSH(0);
fword("active-package!");
bind_func("pgmap@", pgmap_fetch);
bind_func("pgmap!", pgmap_store);
bind_func("map-pages", ob_map_pages);
init_romvec_mem();
}
/*
* Switch page tables.
*/
void
init_mmu_swift(void)
{
unsigned int addr, i;
unsigned long pa, va;
mem_init(&cmem, (char *) &_vmem, (char *)&_evmem);
mem_init(&cio, (char *)&_end, (char *)&_iomem);
posix_memalign((void *)&context_table, NCTX_SWIFT * sizeof(int),
NCTX_SWIFT * sizeof(int));
posix_memalign((void *)&l1, 256 * sizeof(int), 256 * sizeof(int));
context_table[0] = (((unsigned long)va2pa((unsigned long)l1)) >> 4) |
SRMMU_ET_PTD;
for (i = 1; i < NCTX_SWIFT; i++) {
context_table[i] = SRMMU_ET_INVALID;
}
for (i = 0; i < 256; i += 4) {
l1[i] = SRMMU_ET_INVALID;
}
// text, rodata, data, and bss mapped to end of RAM
va = (unsigned long)&_start;
for (; va < (unsigned long)&_end; va += PAGE_SIZE) {
pa = va2pa(va);
map_page(va, pa, 0);
}
// 1:1 mapping for RAM
pa = va = 0;
for (; va < LOWMEMSZ; va += PAGE_SIZE, pa += PAGE_SIZE) {
map_page(va, pa, 0);
}
/*
* Flush cache
*/
for (addr = 0; addr < 0x2000; addr += 0x10) {
__asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_DATAC_TAG));
__asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
"r" (addr<<1), "i" (ASI_M_TXTC_TAG));
}
srmmu_set_context(0);
srmmu_set_ctable_ptr(va2pa((unsigned long)context_table));
srmmu_flush_whole_tlb();
}

View File

@@ -111,10 +111,9 @@ static void init_memory(void)
static void
arch_init( void )
{
void setup_timers(void);
modules_init();
ob_init_mmu(hwdef->iommu_base);
ob_init_mmu();
ob_init_iommu(hwdef->iommu_base);
#ifdef CONFIG_DRIVER_OBIO
ob_obio_init(hwdef->slavio_base, hwdef->fd_offset,
hwdef->counter_offset, hwdef->intr_offset,
@@ -146,9 +145,8 @@ int openbios(void)
if (!hwdef)
for(;;); // Internal inconsistency, hang
mem_init(&cmem, (char *) &_vmem, (char *)&_evmem);
#ifdef CONFIG_DRIVER_SBUS
init_mmu_swift(hwdef->iommu_base);
init_mmu_swift();
#endif
#ifdef CONFIG_DEBUG_CONSOLE
#ifdef CONFIG_DEBUG_CONSOLE_SERIAL

View File

@@ -8,14 +8,9 @@
*/
#include "openprom.h"
#include "asm/io.h"
#include "asm/types.h"
#include "libc/vsprintf.h"
#include "openbios/config.h"
#include "openbios/bindings.h"
#include "openbios/drivers.h"
#include "openbios/kernel.h"
#include "openbios/sysinclude.h"
#include "sys_info.h"
#include "boot.h"
@@ -26,15 +21,6 @@
#define DPRINTF(fmt, args...)
#endif
#define PAGE_SIZE 4096
static struct linux_mlist_v0 totphys[1];
static struct linux_mlist_v0 totmap[1];
static struct linux_mlist_v0 totavail[1];
static struct linux_mlist_v0 *ptphys;
static struct linux_mlist_v0 *ptmap;
static struct linux_mlist_v0 *ptavail;
char obp_stdin, obp_stdout;
static int obp_fd_stdin, obp_fd_stdout;
const char *obp_stdin_path, *obp_stdout_path;
@@ -321,37 +307,6 @@ static int obp_rdblkdev(int dev_desc, int num_blks, int offset, char *buf)
return ret;
}
static char *obp_dumb_mmap(char *va, int which_io, unsigned int pa,
unsigned int size)
{
unsigned int npages;
unsigned int off;
unsigned int mva;
uint64_t mpa = ((uint64_t)which_io << 32) | (uint64_t)pa;
DPRINTF("obp_dumb_mmap: virta 0x%x, paddr 0x%llx, sz %d\n",
(unsigned int)va, mpa, size);
off = pa & (PAGE_SIZE-1);
npages = (off + (size - 1) + (PAGE_SIZE-1)) / PAGE_SIZE;
mpa &= ~(uint64_t)(PAGE_SIZE - 1);
mva = (unsigned int) va;
while (npages-- != 0) {
map_page(mva, mpa, 1);
mva += PAGE_SIZE;
mpa += (uint64_t)PAGE_SIZE;
}
return va;
}
static void obp_dumb_munmap(__attribute__((unused)) char *va,
__attribute__((unused)) unsigned int size)
{
DPRINTF("obp_dumb_munmap: virta 0x%x, sz %d\n", (unsigned int)va, size);
}
static int obp_devread(int dev_desc, char *buf, int nbytes)
{
int ret;
@@ -413,37 +368,6 @@ static int obp_inst2pkg(int dev_desc)
return ret;
}
static void obp_dumb_memfree(__attribute__((unused))char *va,
__attribute__((unused))unsigned sz)
{
DPRINTF("obp_dumb_memfree 0x%x(%d)\n", va, sz);
}
static char * obp_dumb_memalloc(char *va, unsigned int size)
{
static unsigned int next_free_address = 0xFFEDA000;
size = (size + 7) & ~7;
totmap[0].num_bytes -= size;
DPRINTF("obp_dumb_memalloc req 0x%x of %d at 0x%x\n", va, size,
totmap[0].num_bytes);
// If va is null, the allocator is supposed to pick a "suitable" address.
// (See OpenSolaric prom_alloc.c) There's not any real guidance as
// to what might be "suitable". So we mimic the behavior of a Sun boot
// ROM.
if (va == NULL) {
va = (char *)(next_free_address - size);
next_free_address -= size;
DPRINTF("obp_dumb_memalloc req null -> 0x%x\n", va);
}
obp_dumb_mmap(va, 0, totmap[0].num_bytes, size);
return va;
}
static int obp_cpustart(__attribute__((unused))unsigned int whichcpu,
__attribute__((unused))int ctxtbl_ptr,
__attribute__((unused))int thiscontext,
@@ -501,27 +425,8 @@ static void obp_fortheval_v2(char *str)
}
void *
init_openprom(unsigned long memsize)
init_openprom(void)
{
ptphys = totphys;
ptmap = totmap;
ptavail = totavail;
/*
* Form memory descriptors.
*/
totphys[0].theres_more = NULL;
totphys[0].start_adr = (char *) 0;
totphys[0].num_bytes = memsize;
totavail[0].theres_more = NULL;
totavail[0].start_adr = (char *) 0;
totavail[0].num_bytes = va2pa((int)&_start) - PAGE_SIZE;
totmap[0].theres_more = NULL;
totmap[0].start_adr = &_start;
totmap[0].num_bytes = (unsigned long) &_iomem - (unsigned long) &_start + PAGE_SIZE;
// Linux wants a R/W romvec table
romvec0.pv_magic_cookie = LINUX_OPPROM_MAGIC;
romvec0.pv_romvers = 3;

View File

@@ -24,6 +24,7 @@
#include "scsi.h"
#include "asm/dma.h"
#include "esp.h"
#include "ofmem.h"
#define BUFSIZE 4096

View File

@@ -6,24 +6,9 @@
**/
#include "openbios/config.h"
#include "openbios/bindings.h"
#include "openbios/kernel.h"
#include "libc/byteorder.h"
#include "libc/vsprintf.h"
#include "libc/string.h"
#include "openbios/drivers.h"
#include "asm/asi.h"
#include "asm/crs.h"
#include "asm/io.h"
#include "pgtsrmmu.h"
#include "iommu.h"
#define IOMMU_REGS 0x300
#define NCTX_SWIFT 0x100
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
#define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
#define LOWMEMSZ 32 * 1024 * 1024
#include "ofmem.h"
#ifdef CONFIG_DEBUG_IOMMU
#define DPRINTF(fmt, args...) \
@@ -32,22 +17,6 @@
#define DPRINTF(fmt, args...)
#endif
/*
* Allocatable memory chunk.
*/
struct mem {
char *start, *uplim;
char *curp;
};
struct mem cmem; /* Current memory, virtual */
static struct mem cio; /* Current I/O space */
unsigned int va_shift;
static unsigned long *context_table;
static unsigned long *l1;
/*
* IOMMU parameters
*/
@@ -55,14 +24,9 @@ struct iommu {
struct iommu_regs *regs;
unsigned int *page_table;
unsigned long plow; /* Base bus address */
unsigned long vasize; /* Size of VA region that we manage */
struct mem bmap;
};
static struct iommu ciommu;
static struct iommu_regs *regs;
static void iommu_init(struct iommu *t, uint64_t base);
static void
iommu_invalidate(struct iommu_regs *iregs)
@@ -70,352 +34,6 @@ iommu_invalidate(struct iommu_regs *iregs)
iregs->tlbflush = 0;
}
/*
* Allocate memory. This is reusable.
*/
void
mem_init(struct mem *t, char *begin, char *limit)
{
t->start = begin;
t->uplim = limit;
t->curp = begin;
}
void *
mem_alloc(struct mem *t, int size, int align)
{
char *p;
unsigned long pa;
// The alignment restrictions refer to physical, not virtual
// addresses
pa = va2pa((unsigned long)t->curp) + (align - 1);
pa &= ~(align - 1);
p = (char *)pa2va(pa);
if ((unsigned long)p >= (unsigned long)t->uplim ||
(unsigned long)p + size > (unsigned long)t->uplim)
return NULL;
t->curp = p + size;
return p;
}
void *
mem_zalloc(struct mem *t, int size, int align)
{
char *p;
if ((p = mem_alloc(t, size, align)) != NULL)
memset(p, 0, size);
return p;
}
static unsigned long
find_pte(unsigned long va, int alloc)
{
uint32_t pte;
void *p;
unsigned long pa;
pte = l1[(va >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1)];
if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
if (alloc) {
p = mem_zalloc(&cmem, SRMMU_PTRS_PER_PMD * sizeof(int),
SRMMU_PTRS_PER_PMD * sizeof(int));
if (p == NULL)
return -1;
pte = SRMMU_ET_PTD | ((va2pa((unsigned long)p)) >> 4);
l1[(va >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1)] = pte;
/* barrier() */
} else {
return -1;
}
}
pa = (pte & 0xFFFFFFF0) << 4;
pa += ((va >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)) << 2;
pte = *(uint32_t *)pa2va(pa);
if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
if (alloc) {
p = mem_zalloc(&cmem, SRMMU_PTRS_PER_PTE * sizeof(void *),
SRMMU_PTRS_PER_PTE * sizeof(void *));
if (p == NULL)
return -2;
pte = SRMMU_ET_PTD | ((va2pa((unsigned int)p)) >> 4);
*(uint32_t *)pa2va(pa) = pte;
} else {
return -2;
}
}
pa = (pte & 0xFFFFFFF0) << 4;
pa += ((va >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)) << 2;
return pa2va(pa);
}
/*
* Create a memory mapping from va to epa.
*/
int
map_page(unsigned long va, uint64_t epa, int type)
{
uint32_t pte;
unsigned long pa;
pa = find_pte(va, 1);
pte = SRMMU_ET_PTE | ((epa & PAGE_MASK) >> 4);
if (type) { /* I/O */
pte |= SRMMU_REF;
/* SRMMU cannot make Supervisor-only, but not exectutable */
pte |= SRMMU_PRIV;
} else { /* memory */
pte |= SRMMU_REF | SRMMU_CACHE;
pte |= SRMMU_PRIV; /* Supervisor only access */
}
*(uint32_t *)pa = pte;
DPRINTF("map_page: va 0x%lx pa 0x%llx pte 0x%x\n", va, epa, pte);
return 0;
}
/*
* Create an I/O mapping to pa[size].
* Returns va of the mapping or 0 if unsuccessful.
*/
void *
map_io(uint64_t pa, int size)
{
void *va;
unsigned int npages;
unsigned int off;
unsigned int mva;
off = pa & (PAGE_SIZE - 1);
npages = (off + size - 1) / PAGE_SIZE + 1;
pa &= ~(PAGE_SIZE - 1);
va = mem_alloc(&cio, npages * PAGE_SIZE, PAGE_SIZE);
if (va == NULL)
return va;
mva = (unsigned int) va;
DPRINTF("map_io: va 0x%p pa 0x%llx off 0x%x npages %d\n", va, pa, off, npages); /* P3 */
while (npages-- != 0) {
map_page(mva, pa, 1);
mva += PAGE_SIZE;
pa += PAGE_SIZE;
}
return (void *)((unsigned int)va + off);
}
/*
* D5.3 pgmap@ ( va -- pte )
*/
static void
pgmap_fetch(void)
{
uint32_t pte;
unsigned long va, pa;
va = POP();
pa = find_pte(va, 0);
if (pa == 1 || pa == 2)
goto error;
pte = *(uint32_t *)pa;
DPRINTF("pgmap@: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte);
PUSH(pte);
return;
error:
PUSH(0);
}
/*
* D5.3 pgmap! ( pte va -- )
*/
static void
pgmap_store(void)
{
uint32_t pte;
unsigned long va, pa;
va = POP();
pte = POP();
pa = find_pte(va, 1);
*(uint32_t *)pa = pte;
DPRINTF("pgmap!: va 0x%lx pa 0x%lx pte 0x%x\n", va, pa, pte);
}
/*
* D5.3 map-pages ( pa space va size -- )
*/
static void
map_pages(void)
{
unsigned long va;
int size;
uint64_t pa;
size = POP();
va = POP();
pa = POP();
pa <<= 32;
pa |= POP() & 0xffffffff;
for (; size > 0; size -= PAGE_SIZE, pa += PAGE_SIZE, va += PAGE_SIZE)
map_page(va, pa, 1);
DPRINTF("map-page: va 0x%lx pa 0x%lx size 0x%x\n", va, pa, size);
}
void
ob_init_mmu(uint64_t base)
{
extern unsigned int qemu_mem_size;
push_str("/memory");
fword("find-device");
PUSH(0);
fword("encode-int");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH(qemu_mem_size);
fword("encode-int");
fword("encode+");
push_str("reg");
fword("property");
PUSH(0);
fword("encode-int");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH(va2pa((unsigned long)&_start) - PAGE_SIZE);
fword("encode-int");
fword("encode+");
push_str("available");
fword("property");
push_str("/virtual-memory");
fword("find-device");
PUSH(base >> 32);
fword("encode-int");
PUSH(base & 0xffffffff);
fword("encode-int");
fword("encode+");
PUSH(IOMMU_REGS);
fword("encode-int");
fword("encode+");
push_str("reg");
fword("property");
PUSH(0);
fword("encode-int");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH((unsigned long)&_start - PAGE_SIZE);
fword("encode-int");
fword("encode+");
PUSH(0);
fword("encode-int");
fword("encode+");
PUSH(va2pa((unsigned long)&_iomem));
fword("encode-int");
fword("encode+");
PUSH(-va2pa((unsigned long)&_iomem));
fword("encode-int");
fword("encode+");
push_str("available");
fword("property");
push_str("/iommu");
fword("find-device");
PUSH((unsigned long)regs);
fword("encode-int");
push_str("address");
fword("property");
PUSH(base >> 32);
fword("encode-int");
PUSH(base & 0xffffffff);
fword("encode-int");
fword("encode+");
PUSH(IOMMU_REGS);
fword("encode-int");
fword("encode+");
push_str("reg");
fword("property");
PUSH(0);
fword("active-package!");
bind_func("pgmap@", pgmap_fetch);
bind_func("pgmap!", pgmap_store);
bind_func("map-pages", map_pages);
}
/*
* Switch page tables.
*/
void
init_mmu_swift(uint64_t base)
{
unsigned int addr, i;
unsigned long pa, va;
mem_init(&cio, (char *)&_end, (char *)&_iomem);
context_table = mem_zalloc(&cmem, NCTX_SWIFT * sizeof(int), NCTX_SWIFT * sizeof(int));
l1 = mem_zalloc(&cmem, 256 * sizeof(int), 256 * sizeof(int));
context_table[0] = (((unsigned long)va2pa((unsigned long)l1)) >> 4) | SRMMU_ET_PTD;
for (i = 1; i < NCTX_SWIFT; i++) {
context_table[i] = SRMMU_ET_INVALID;
}
for (i = 0; i < 256; i += 4) {
l1[i] = SRMMU_ET_INVALID;
}
// text, rodata, data, and bss mapped to end of RAM
va = (unsigned long)&_start;
for (; va < (unsigned long)&_end; va += PAGE_SIZE) {
pa = va2pa(va);
map_page(va, pa, 0);
}
// 1:1 mapping for RAM
pa = va = 0;
for (; va < LOWMEMSZ; va += PAGE_SIZE, pa += PAGE_SIZE) {
map_page(va, pa, 0);
}
/*
* Flush cache
*/
for (addr = 0; addr < 0x2000; addr += 0x10) {
__asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_DATAC_TAG));
__asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
"r" (addr<<1), "i" (ASI_M_TXTC_TAG));
}
srmmu_set_context(0);
srmmu_set_ctable_ptr(va2pa((unsigned long)context_table));
srmmu_flush_whole_tlb();
iommu_init(&ciommu, base);
}
/*
* XXX This is a problematic interface. We alloc _memory_ which is uncached.
* So if we ever reuse allocations somebody is going to get uncached pages.
@@ -432,13 +50,14 @@ dvma_alloc(int size, unsigned int *pphys)
unsigned int i;
unsigned int *iopte;
struct iommu *t = &ciommu;
int ret;
npages = (size + (PAGE_SIZE-1)) / PAGE_SIZE;
va = mem_alloc(&cmem, npages * PAGE_SIZE, PAGE_SIZE);
if (va == NULL)
ret = posix_memalign(&va, npages * PAGE_SIZE, PAGE_SIZE);
if (ret != 0)
return NULL;
ba = (unsigned int)mem_alloc(&t->bmap, npages * PAGE_SIZE, PAGE_SIZE);
ba = (unsigned int)mem_alloc(&cdvmem, npages * PAGE_SIZE, PAGE_SIZE);
if (ba == 0)
return NULL;
@@ -475,13 +94,16 @@ dvma_alloc(int size, unsigned int *pphys)
* This looks like initialization of CPU MMU but
* the routine is higher in food chain.
*/
static void
static struct iommu_regs *
iommu_init(struct iommu *t, uint64_t base)
{
unsigned int *ptab;
int ptsize;
unsigned int impl, vers;
unsigned int tmp;
struct iommu_regs *regs;
int ret;
unsigned long vasize;
regs = map_io(base, IOMMU_REGS);
if (regs == NULL) {
@@ -497,15 +119,17 @@ iommu_init(struct iommu *t, uint64_t base)
tmp |= (IOMMU_RNGE_32MB | IOMMU_CTRL_ENAB);
t->plow = 0xfe000000; /* End - 32 MB */
t->vasize = 0x2000000; /* 32 MB */
/* Size of VA region that we manage */
vasize = 0x2000000; /* 32 MB */
regs->control = tmp;
iommu_invalidate(regs);
/* Allocate IOMMU page table */
/* Thremendous alignment causes great waste... */
ptsize = (t->vasize/PAGE_SIZE) * sizeof(int);
if ((ptab = mem_zalloc(&cmem, ptsize, ptsize)) == NULL) {
/* Tremendous alignment causes great waste... */
ptsize = (vasize / PAGE_SIZE) * sizeof(int);
ret = posix_memalign((void *)&ptab, ptsize, ptsize);
if (ret != 0) {
DPRINTF("Cannot allocate IOMMU table [0x%x]\n", ptsize);
for (;;) { }
}
@@ -520,5 +144,46 @@ iommu_init(struct iommu *t, uint64_t base)
DPRINTF("IOMMU: impl %d vers %d page table at 0x%p (pa 0x%x) of size %d bytes\n",
impl, vers, t->page_table, tmp, ptsize);
mem_init(&t->bmap, (char*)t->plow, (char *)0xfffff000);
mem_init(&cdvmem, (char*)t->plow, (char *)0xfffff000);
return regs;
}
void
ob_init_iommu(uint64_t base)
{
struct iommu_regs *regs;
regs = iommu_init(&ciommu, base);
push_str("/virtual-memory");
fword("find-device");
PUSH(base >> 32);
fword("encode-int");
PUSH(base & 0xffffffff);
fword("encode-int");
fword("encode+");
PUSH(IOMMU_REGS);
fword("encode-int");
fword("encode+");
push_str("reg");
fword("property");
push_str("/iommu");
fword("find-device");
PUSH((unsigned long)regs);
fword("encode-int");
push_str("address");
fword("property");
PUSH(base >> 32);
fword("encode-int");
PUSH(base & 0xffffffff);
fword("encode-int");
fword("encode+");
PUSH(IOMMU_REGS);
fword("encode-int");
fword("encode+");
push_str("reg");
fword("property");
}

View File

@@ -95,3 +95,8 @@ struct iommu_regs {
#define IOPTE_WRITE 0x00000004 /* Writeable */
#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
#define IOPTE_WAZ 0x00000001 /* Write as zeros */
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
#define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
#define IOMMU_REGS 0x300

View File

@@ -18,6 +18,7 @@
#include "openbios/drivers.h"
#include "openbios/nvram.h"
#include "ofmem.h"
#include "obio.h"
#define cpu_to_be16(x) __cpu_to_be16(x)
#include "openbios/firmware_abi.h"

View File

@@ -36,7 +36,6 @@
#define SLAVIO_SIZE 0x01000000
#define SUN4M_NCPUS 16
#define PAGE_SIZE 4096
#define CFG_ADDR 0xd00000510ULL
#define CFG_SIZE 3

View File

@@ -15,8 +15,8 @@
#include "openbios/kernel.h"
#include "libc/byteorder.h"
#include "libc/vsprintf.h"
#include "openbios/drivers.h"
#include "ofmem.h"
#define SBUS_REGS 0x28
#define SBUS_SLOTS 16

View File

@@ -19,6 +19,7 @@
extern void *malloc( int size );
extern void free( void *ptr );
extern void *realloc( void *ptr, size_t size );
extern int posix_memalign(void **memptr, size_t alignment, size_t size);
/* should perhaps go somewhere else... */
extern void qsort( void *base, size_t nmemb, size_t size, int (*compar)(const void*, const void*));

View File

@@ -37,6 +37,20 @@ void dsi_exception( void );
void isi_exception( void );
void setup_mmu( ulong ramsize );
void ofmem_register( phandle_t ph );
#elif defined(CONFIG_SPARC32)
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/* arch/sparc32/lib.c */
struct mem;
extern struct mem cdvmem;
void mem_init(struct mem *t, char *begin, char *limit);
void *mem_alloc(struct mem *t, int size, int align);
int map_page(unsigned long va, uint64_t epa, int type);
void *map_io(uint64_t pa, int size);
#endif
#endif /* _H_OFMEM */

View File

@@ -54,7 +54,8 @@ void serial_cls(void);
int uart_init(uint64_t port, unsigned long speed);
/* drivers/iommu.c */
extern struct mem cmem;
void ob_init_iommu(uint64_t base);
void *dvma_alloc(int size, unsigned int *pphys);
/* drivers/sbus.c */
extern uint16_t graphic_depth;

View File

@@ -31,17 +31,6 @@ pa2va(unsigned long pa)
#define phys_to_virt(phys) ((void *) ((unsigned long) (phys)))
#define virt_to_phys(virt) ((unsigned long) (virt))
struct mem;
void mem_init(struct mem *t, char *begin, char *limit);
void *mem_alloc(struct mem *t, int size, int align);
void *mem_zalloc(struct mem *t, int size, int align);
int map_page(unsigned long va, uint64_t epa, int type);
void *map_io(uint64_t pa, int size);
void ob_init_mmu(uint64_t base);
void init_mmu_swift(uint64_t base);
void *dvma_alloc(int size, unsigned int *pphys);
#ifndef BOOTSTRAP
#ifndef _IO_BASE