Compare commits
1 Commits
e69bbde0e8
...
77ce083a04
Author | SHA1 | Date |
---|---|---|
Rekai Nyangadzayi Musuka | 77ce083a04 |
|
@ -92,14 +92,17 @@ pub fn runFrame(scheduler: *Scheduler, system: System) void {
|
||||||
// FIXME: Perf win to allocating on the stack instead?
|
// FIXME: Perf win to allocating on the stack instead?
|
||||||
pub const SharedContext = struct {
|
pub const SharedContext = struct {
|
||||||
const MiB = 0x100000;
|
const MiB = 0x100000;
|
||||||
|
const KiB = 0x400;
|
||||||
|
|
||||||
io: *SharedIo,
|
io: *SharedIo,
|
||||||
main: *[4 * MiB]u8,
|
main: *[4 * MiB]u8,
|
||||||
|
wram: *[32 * KiB]u8,
|
||||||
|
|
||||||
pub fn init(allocator: Allocator) !@This() {
|
pub fn init(allocator: Allocator) !@This() {
|
||||||
const ctx = .{
|
const ctx = .{
|
||||||
.io = try allocator.create(SharedIo),
|
.io = try allocator.create(SharedIo),
|
||||||
.main = try allocator.create([4 * MiB]u8),
|
.main = try allocator.create([4 * MiB]u8),
|
||||||
|
.wram = try allocator.create([32 * KiB]u8),
|
||||||
};
|
};
|
||||||
ctx.io.* = .{};
|
ctx.io.* = .{};
|
||||||
|
|
||||||
|
@ -109,6 +112,7 @@ pub const SharedContext = struct {
|
||||||
pub fn deinit(self: @This(), allocator: Allocator) void {
|
pub fn deinit(self: @This(), allocator: Allocator) void {
|
||||||
allocator.destroy(self.io);
|
allocator.destroy(self.io);
|
||||||
allocator.destroy(self.main);
|
allocator.destroy(self.main);
|
||||||
|
allocator.destroy(self.wram);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ const KiB = 0x400;
|
||||||
const log = std.log.scoped(.nds9_bus);
|
const log = std.log.scoped(.nds9_bus);
|
||||||
|
|
||||||
main: *[4 * MiB]u8,
|
main: *[4 * MiB]u8,
|
||||||
|
wram: *[32 * KiB]u8,
|
||||||
vram1: *[512 * KiB]u8, // TODO: Rename
|
vram1: *[512 * KiB]u8, // TODO: Rename
|
||||||
io: io.Io,
|
io: io.Io,
|
||||||
ppu: Ppu,
|
ppu: Ppu,
|
||||||
|
@ -31,6 +32,7 @@ pub fn init(allocator: Allocator, scheduler: *Scheduler, shared_ctx: SharedConte
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
.main = shared_ctx.main,
|
.main = shared_ctx.main,
|
||||||
|
.wram = shared_ctx.wram,
|
||||||
.vram1 = vram1_mem,
|
.vram1 = vram1_mem,
|
||||||
.ppu = try Ppu.init(allocator),
|
.ppu = try Ppu.init(allocator),
|
||||||
.scheduler = scheduler,
|
.scheduler = scheduler,
|
||||||
|
@ -71,6 +73,7 @@ fn _read(self: *@This(), comptime T: type, comptime mode: Mode, address: u32) T
|
||||||
|
|
||||||
return switch (aligned_addr) {
|
return switch (aligned_addr) {
|
||||||
0x0200_0000...0x02FF_FFFF => readInt(T, self.main[aligned_addr & 0x003F_FFFF ..][0..byte_count]),
|
0x0200_0000...0x02FF_FFFF => readInt(T, self.main[aligned_addr & 0x003F_FFFF ..][0..byte_count]),
|
||||||
|
// TODO: Impl Shared WRAM
|
||||||
0x0400_0000...0x04FF_FFFF => io.read(self, T, aligned_addr),
|
0x0400_0000...0x04FF_FFFF => io.read(self, T, aligned_addr),
|
||||||
0x0600_0000...0x06FF_FFFF => readInt(T, self.vram1[aligned_addr & 0x0007_FFFF ..][0..byte_count]),
|
0x0600_0000...0x06FF_FFFF => readInt(T, self.vram1[aligned_addr & 0x0007_FFFF ..][0..byte_count]),
|
||||||
else => warn("unexpected read: 0x{x:0>8} -> {}", .{ aligned_addr, T }),
|
else => warn("unexpected read: 0x{x:0>8} -> {}", .{ aligned_addr, T }),
|
||||||
|
@ -109,3 +112,70 @@ fn warn(comptime format: []const u8, args: anytype) u0 {
|
||||||
log.warn(format, args);
|
log.warn(format, args);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Before I implement Bus-wide Fastmem, Let's play with some more limited (read: less useful)
|
||||||
|
// fastmem implementations
|
||||||
|
const Wram = struct {
|
||||||
|
const page_size = 1 * KiB; // perhaps too big?
|
||||||
|
const addr_space_size = 0x0100_0000;
|
||||||
|
const table_len = addr_space_size / page_size;
|
||||||
|
const IntFittingRange = std.math.IntFittingRange;
|
||||||
|
|
||||||
|
_ptr: *[32 * KiB]u8,
|
||||||
|
|
||||||
|
read_table: *const [table_len]?*const anyopaque,
|
||||||
|
write_table: *const [table_len]?*anyopaque,
|
||||||
|
|
||||||
|
pub fn init(allocator: Allocator, ptr: *[32 * KiB]u8) @This() {
|
||||||
|
const tables = try allocator.create(?*anyopaque, 2 * table_len);
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.read_table = tables[0..table_len],
|
||||||
|
.write_table = tables[table_len .. 2 * table_len],
|
||||||
|
._ptr = ptr,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update(_: *@This()) void {
|
||||||
|
@panic("TODO: reload WRAM FASTMEM");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read(self: @This(), comptime T: type, address: u32) T {
|
||||||
|
const bits = @typeInfo(IntFittingRange(0, page_size - 1)).Int.bits;
|
||||||
|
const page = address >> bits;
|
||||||
|
const offset = address & (page_size - 1);
|
||||||
|
|
||||||
|
std.debug.assert(page < table_len);
|
||||||
|
|
||||||
|
if (self.read_table[page]) |some_ptr| {
|
||||||
|
const ptr: [*]const T = @ptrCast(@alignCast(some_ptr));
|
||||||
|
|
||||||
|
return ptr[forceAlign(T, offset) / @sizeOf(T)];
|
||||||
|
}
|
||||||
|
|
||||||
|
log.err("read(T: {}, addr: 0x{X:0>8}) was in un-mapped WRAM space", .{ T, address });
|
||||||
|
return 0x00;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write(self: *@This(), comptime T: type, address: u32, value: T) void {
|
||||||
|
const bits = @typeInfo(IntFittingRange(0, page_size - 1)).Int.bits;
|
||||||
|
const page = address >> bits;
|
||||||
|
const offset = address & (page_size - 1);
|
||||||
|
|
||||||
|
std.debug.assert(page < table_len);
|
||||||
|
|
||||||
|
if (self.write_table[page]) |some_ptr| {
|
||||||
|
const ptr: [*]T = @ptrCast(@alignCast(some_ptr));
|
||||||
|
ptr[forceAlign(T, offset) / @sizeOf(T)] = value;
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.warn("write(T: {}, addr: 0x{X:0>8}, value: 0x{X:0>8}) was in un-mapped WRAM space", .{ T, address, value });
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: @This(), allocator: Allocator) void {
|
||||||
|
const og_ptr: [*]?*anyopaque = @ptrCast(self.read_table);
|
||||||
|
allocator.free(og_ptr[0 .. 2 * table_len]);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
Loading…
Reference in New Issue