chore: move OAM, PALRAM and VRAM structs to separate files

This commit is contained in:
Rekai Nyangadzayi Musuka 2022-09-18 07:17:54 -03:00
parent d66021dc9e
commit add7ea6f1d
5 changed files with 164 additions and 167 deletions

View File

@ -227,7 +227,7 @@ fn DmaController(comptime id: u2) type {
}
}
pub fn pollBlankingDma(self: *Self, comptime kind: DmaKind) void {
pub fn pollBlanking(self: *Self, comptime kind: DmaKind) void {
if (self.in_progress) return; // If there's an ongoing DMA Transfer, exit early
// No ongoing DMA Transfer, We want to check if we should repeat an existing one
@ -271,11 +271,11 @@ fn DmaController(comptime id: u2) type {
};
}
pub fn pollBlankingDma(bus: *Bus, comptime kind: DmaKind) void {
bus.dma[0].pollBlankingDma(kind);
bus.dma[1].pollBlankingDma(kind);
bus.dma[2].pollBlankingDma(kind);
bus.dma[3].pollBlankingDma(kind);
pub fn onBlanking(bus: *Bus, comptime kind: DmaKind) void {
bus.dma[0].pollBlanking(kind);
bus.dma[1].pollBlanking(kind);
bus.dma[2].pollBlanking(kind);
bus.dma[3].pollBlanking(kind);
}
const Adjustment = enum(u2) {

View File

@ -1,16 +1,18 @@
const std = @import("std");
const io = @import("bus/io.zig");
const Bit = @import("bitfield").Bit;
const Bitfield = @import("bitfield").Bitfield;
const dma = @import("bus/dma.zig");
const Oam = @import("ppu/Oam.zig");
const Palette = @import("ppu/Palette.zig");
const Vram = @import("ppu/Vram.zig");
const EventKind = @import("scheduler.zig").EventKind;
const Scheduler = @import("scheduler.zig").Scheduler;
const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
const Bit = @import("bitfield").Bit;
const Bitfield = @import("bitfield").Bitfield;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.PPU);
const pollBlankingDma = @import("bus/dma.zig").pollBlankingDma;
const log = std.log.scoped(.Ppu);
/// This is used to generate byuu / Talurabi's Color Correction algorithm
const COLOUR_LUT = genColourLut();
@ -488,7 +490,7 @@ pub const Ppu = struct {
while (i < width) : (i += 1) {
// If we're outside of the bounds of mode 5, draw the background colour
const bgr555 =
if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.getBackdrop();
if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.backdrop();
std.mem.writeIntNative(u32, self.framebuf.get(.Emulator)[fb_base + i * @sizeOf(u32) ..][0..@sizeOf(u32)], COLOUR_LUT[bgr555 & 0x7FFF]);
}
@ -532,7 +534,7 @@ pub const Ppu = struct {
}
if (maybe_top) |top| return top;
return self.palette.getBackdrop();
return self.palette.backdrop();
}
fn copyToBackgroundBuffer(self: *Self, comptime n: u2, bounds: ?WindowBounds, i: usize, bgr555: u16) void {
@ -661,7 +663,7 @@ pub const Ppu = struct {
// See if HBlank DMA is present and not enabled
if (!self.dispstat.vblank.read())
pollBlankingDma(cpu.bus, .HBlank);
dma.onBlanking(cpu.bus, .HBlank);
self.dispstat.hblank.set();
self.sched.push(.HBlank, 68 * 4 -| late);
@ -703,7 +705,7 @@ pub const Ppu = struct {
self.aff_bg[1].latchRefPoints();
// See if Vblank DMA is present and not enabled
pollBlankingDma(cpu.bus, .VBlank);
dma.onBlanking(cpu.bus, .VBlank);
}
if (scanline == 227) self.dispstat.vblank.unset();
@ -712,158 +714,6 @@ pub const Ppu = struct {
}
};
const Palette = struct {
const palram_size = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, palram_size);
std.mem.set(u8, buf, 0);
return Self{
.buf = buf,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("PALRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => {
const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("PALRAM: Unsupported write width"),
}
}
fn getBackdrop(self: *const Self) u16 {
return self.read(u16, 0);
}
};
const Vram = struct {
const vram_size = 0x18000;
const Self = @This();
buf: []u8,
allocator: Allocator,
fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, vram_size);
std.mem.set(u8, buf, 0);
return Self{
.buf = buf,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = Self.mirror(address);
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("VRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
const mode: u3 = dispcnt.bg_mode.read();
const idx = Self.mirror(address);
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
u8 => {
// Ignore write if it falls within the boundaries of OBJ VRAM
switch (mode) {
0, 1, 2 => if (0x0001_0000 <= idx) return,
else => if (0x0001_4000 <= idx) return,
}
const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("VRAM: Unsupported write width"),
}
}
fn mirror(address: usize) usize {
// Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
const addr = address & 0x1FFFF;
// If the address is within 96K we don't do anything,
// otherwise we want to mirror the last 32K (addresses between 64K and 96K)
return if (addr < vram_size) addr else 0x10000 + (addr & 0x7FFF);
}
};
const Oam = struct {
const oam_size = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, oam_size);
std.mem.set(u8, buf, 0);
return Self{
.buf = buf,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("OAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => return, // 8-bit writes are explicitly ignored
else => @compileError("OAM: Unsupported write width"),
}
}
};
const Window = struct {
const Self = @This();

40
src/core/ppu/Oam.zig Normal file
View File

@ -0,0 +1,40 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const buf_len = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("OAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => return, // 8-bit writes are explicitly ignored
else => @compileError("OAM: Unsupported write width"),
}
}
pub fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, buf_len);
std.mem.set(u8, buf, 0);
return Self{ .buf = buf, .allocator = allocator };
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}

47
src/core/ppu/Palette.zig Normal file
View File

@ -0,0 +1,47 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const buf_len = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("PALRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => {
const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("PALRAM: Unsupported write width"),
}
}
pub fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, buf_len);
std.mem.set(u8, buf, 0);
return Self{ .buf = buf, .allocator = allocator };
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn backdrop(self: *const Self) u16 {
return self.read(u16, 0);
}

60
src/core/ppu/Vram.zig Normal file
View File

@ -0,0 +1,60 @@
const std = @import("std");
const io = @import("../bus/io.zig");
const Allocator = std.mem.Allocator;
const buf_len = 0x18000;
const Self = @This();
buf: []u8,
allocator: Allocator,
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = Self.mirror(address);
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("VRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
const mode: u3 = dispcnt.bg_mode.read();
const idx = Self.mirror(address);
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
u8 => {
// Ignore write if it falls within the boundaries of OBJ VRAM
switch (mode) {
0, 1, 2 => if (0x0001_0000 <= idx) return,
else => if (0x0001_4000 <= idx) return,
}
const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("VRAM: Unsupported write width"),
}
}
pub fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, buf_len);
std.mem.set(u8, buf, 0);
return Self{ .buf = buf, .allocator = allocator };
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
fn mirror(address: usize) usize {
// Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
const addr = address & 0x1FFFF;
// If the address is within 96K we don't do anything,
// otherwise we want to mirror the last 32K (addresses between 64K and 96K)
return if (addr < buf_len) addr else 0x10000 + (addr & 0x7FFF);
}