|
|
|
@ -15,6 +15,7 @@ const KiB = 0x400;
|
|
|
|
|
const log = std.log.scoped(.nds9_bus);
|
|
|
|
|
|
|
|
|
|
main: *[4 * MiB]u8,
|
|
|
|
|
wram: *[32 * KiB]u8,
|
|
|
|
|
vram1: *[512 * KiB]u8, // TODO: Rename
|
|
|
|
|
io: io.Io,
|
|
|
|
|
ppu: Ppu,
|
|
|
|
@ -31,6 +32,7 @@ pub fn init(allocator: Allocator, scheduler: *Scheduler, shared_ctx: SharedConte
|
|
|
|
|
|
|
|
|
|
return .{
|
|
|
|
|
.main = shared_ctx.main,
|
|
|
|
|
.wram = shared_ctx.wram,
|
|
|
|
|
.vram1 = vram1_mem,
|
|
|
|
|
.ppu = try Ppu.init(allocator),
|
|
|
|
|
.scheduler = scheduler,
|
|
|
|
@ -71,6 +73,7 @@ fn _read(self: *@This(), comptime T: type, comptime mode: Mode, address: u32) T
|
|
|
|
|
|
|
|
|
|
return switch (aligned_addr) {
|
|
|
|
|
0x0200_0000...0x02FF_FFFF => readInt(T, self.main[aligned_addr & 0x003F_FFFF ..][0..byte_count]),
|
|
|
|
|
// TODO: Impl Shared WRAM
|
|
|
|
|
0x0400_0000...0x04FF_FFFF => io.read(self, T, aligned_addr),
|
|
|
|
|
0x0600_0000...0x06FF_FFFF => readInt(T, self.vram1[aligned_addr & 0x0007_FFFF ..][0..byte_count]),
|
|
|
|
|
else => warn("unexpected read: 0x{x:0>8} -> {}", .{ aligned_addr, T }),
|
|
|
|
@ -109,3 +112,70 @@ fn warn(comptime format: []const u8, args: anytype) u0 {
|
|
|
|
|
log.warn(format, args);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Before I implement Bus-wide Fastmem, Let's play with some more limited (read: less useful)
|
|
|
|
|
// fastmem implementations
|
|
|
|
|
const Wram = struct {
|
|
|
|
|
const page_size = 1 * KiB; // perhaps too big?
|
|
|
|
|
const addr_space_size = 0x0100_0000;
|
|
|
|
|
const table_len = addr_space_size / page_size;
|
|
|
|
|
const IntFittingRange = std.math.IntFittingRange;
|
|
|
|
|
|
|
|
|
|
_ptr: *[32 * KiB]u8,
|
|
|
|
|
|
|
|
|
|
read_table: *const [table_len]?*const anyopaque,
|
|
|
|
|
write_table: *const [table_len]?*anyopaque,
|
|
|
|
|
|
|
|
|
|
pub fn init(allocator: Allocator, ptr: *[32 * KiB]u8) @This() {
|
|
|
|
|
const tables = try allocator.create(?*anyopaque, 2 * table_len);
|
|
|
|
|
|
|
|
|
|
return .{
|
|
|
|
|
.read_table = tables[0..table_len],
|
|
|
|
|
.write_table = tables[table_len .. 2 * table_len],
|
|
|
|
|
._ptr = ptr,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn update(_: *@This()) void {
|
|
|
|
|
@panic("TODO: reload WRAM FASTMEM");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn read(self: @This(), comptime T: type, address: u32) T {
|
|
|
|
|
const bits = @typeInfo(IntFittingRange(0, page_size - 1)).Int.bits;
|
|
|
|
|
const page = address >> bits;
|
|
|
|
|
const offset = address & (page_size - 1);
|
|
|
|
|
|
|
|
|
|
std.debug.assert(page < table_len);
|
|
|
|
|
|
|
|
|
|
if (self.read_table[page]) |some_ptr| {
|
|
|
|
|
const ptr: [*]const T = @ptrCast(@alignCast(some_ptr));
|
|
|
|
|
|
|
|
|
|
return ptr[forceAlign(T, offset) / @sizeOf(T)];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
log.err("read(T: {}, addr: 0x{X:0>8}) was in un-mapped WRAM space", .{ T, address });
|
|
|
|
|
return 0x00;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn write(self: *@This(), comptime T: type, address: u32, value: T) void {
|
|
|
|
|
const bits = @typeInfo(IntFittingRange(0, page_size - 1)).Int.bits;
|
|
|
|
|
const page = address >> bits;
|
|
|
|
|
const offset = address & (page_size - 1);
|
|
|
|
|
|
|
|
|
|
std.debug.assert(page < table_len);
|
|
|
|
|
|
|
|
|
|
if (self.write_table[page]) |some_ptr| {
|
|
|
|
|
const ptr: [*]T = @ptrCast(@alignCast(some_ptr));
|
|
|
|
|
ptr[forceAlign(T, offset) / @sizeOf(T)] = value;
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
log.warn("write(T: {}, addr: 0x{X:0>8}, value: 0x{X:0>8}) was in un-mapped WRAM space", .{ T, address, value });
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn deinit(self: @This(), allocator: Allocator) void {
|
|
|
|
|
const og_ptr: [*]?*anyopaque = @ptrCast(self.read_table);
|
|
|
|
|
allocator.free(og_ptr[0 .. 2 * table_len]);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|