Compare commits

...

13 Commits

Author SHA1 Message Date
Rekai Nyangadzayi Musuka f715585867 chore: dont allocate not-small ?Sprite array on stack
use memset like most other allocations in this emu
2022-10-23 04:46:07 -03:00
Rekai Nyangadzayi Musuka cab40efc59 chore: move FrameBuffer struct to util.zig 2022-10-23 04:46:07 -03:00
Rekai Nyangadzayi Musuka a17300a8e0 chore: move OAM, PALRAM and VRAM structs to separate files 2022-10-23 04:46:07 -03:00
Rekai Nyangadzayi Musuka 2ebe1c0b0e fix: 8-bit writes to WIN PPU registers
Advance Wars depends on these registers similar to Mario Kart's 8-bit
writes to Affine Background registers:
2022-10-23 04:46:07 -03:00
Rekai Nyangadzayi Musuka 6db70638fe chore: refactor window 2022-10-23 04:46:07 -03:00
Rekai Nyangadzayi Musuka bc5ab5810a chore: crude background window impl (no affine) 2022-10-23 04:46:07 -03:00
Rekai Nyangadzayi Musuka decf2a01c9 chore: rename function (misspelt until now somehow) 2022-10-23 04:46:07 -03:00
Rekai Nyangadzayi Musuka 4b8ed3cebb fix(io): resolve embarrasingly simple regression
introduced in 21eddac31e
2022-10-23 04:39:31 -03:00
Rekai Nyangadzayi Musuka 928ce674d9 fix(cpu): fix obscure LDRSH behaviour 2022-10-22 22:12:41 -03:00
Rekai Nyangadzayi Musuka 945dbec013 fix(open-bus): don't rotate result
Rotating misaligned reads is the responsibility of the CPU
2022-10-22 21:32:36 -03:00
Rekai Nyangadzayi Musuka dd98066a34 Merge pull request 'feat(dma): Implement DMA Latch' (#5) from dma-latch into main
Reviewed-on: #5
2022-10-22 23:53:21 +00:00
Rekai Nyangadzayi Musuka a2868dfe9e feat(dma): Implement DMA Latch 2022-10-22 20:52:02 -03:00
Rekai Nyangadzayi Musuka 22979d9450 fix(bios): fix regression
was reading addr_latch + 8, which is a remnant from when I was faking
the pipeline
2022-10-22 15:33:36 -03:00
11 changed files with 384 additions and 299 deletions

View File

@ -169,7 +169,7 @@ fn openBus(self: *const Self, comptime T: type, address: u32) T {
} }
}; };
return @truncate(T, rotr(u32, word, 8 * (address & 3))); return @truncate(T, word);
} }
pub fn read(self: *Self, comptime T: type, address: u32) T { pub fn read(self: *Self, comptime T: type, address: u32) T {

View File

@ -19,7 +19,7 @@ pub fn read(self: *Self, comptime T: type, r15: u32, addr: u32) T {
} }
log.debug("Rejected read since r15=0x{X:0>8}", .{r15}); log.debug("Rejected read since r15=0x{X:0>8}", .{r15});
return @truncate(T, self._read(T, self.addr_latch + 8)); return @truncate(T, self._read(T, self.addr_latch));
} }
pub fn dbgRead(self: *const Self, comptime T: type, r15: u32, addr: u32) T { pub fn dbgRead(self: *const Self, comptime T: type, r15: u32, addr: u32) T {

View File

@ -11,6 +11,8 @@ const log = std.log.scoped(.DmaTransfer);
const setHi = util.setHi; const setHi = util.setHi;
const setLo = util.setLo; const setLo = util.setLo;
const rotr = @import("../../util.zig").rotr;
pub fn create() DmaTuple { pub fn create() DmaTuple {
return .{ DmaController(0).init(), DmaController(1).init(), DmaController(2).init(), DmaController(3).init() }; return .{ DmaController(0).init(), DmaController(1).init(), DmaController(2).init(), DmaController(3).init() };
} }
@ -99,6 +101,7 @@ fn DmaController(comptime id: u2) type {
const sad_mask: u32 = if (id == 0) 0x07FF_FFFF else 0x0FFF_FFFF; const sad_mask: u32 = if (id == 0) 0x07FF_FFFF else 0x0FFF_FFFF;
const dad_mask: u32 = if (id != 3) 0x07FF_FFFF else 0x0FFF_FFFF; const dad_mask: u32 = if (id != 3) 0x07FF_FFFF else 0x0FFF_FFFF;
const WordCount = if (id == 3) u16 else u14;
/// Write-only. The first address in a DMA transfer. (DMASAD) /// Write-only. The first address in a DMA transfer. (DMASAD)
/// Note: use writeSrc instead of manipulating src_addr directly /// Note: use writeSrc instead of manipulating src_addr directly
@ -107,17 +110,19 @@ fn DmaController(comptime id: u2) type {
/// Note: Use writeDst instead of manipulatig dst_addr directly /// Note: Use writeDst instead of manipulatig dst_addr directly
dad: u32, dad: u32,
/// Write-only. The Word Count for the DMA Transfer (DMACNT_L) /// Write-only. The Word Count for the DMA Transfer (DMACNT_L)
word_count: if (id == 3) u16 else u14, word_count: WordCount,
/// Read / Write. DMACNT_H /// Read / Write. DMACNT_H
/// Note: Use writeControl instead of manipulating cnt directly. /// Note: Use writeControl instead of manipulating cnt directly.
cnt: DmaControl, cnt: DmaControl,
/// Internal. The last successfully read value
data_latch: u32,
/// Internal. Currrent Source Address /// Internal. Currrent Source Address
sad_latch: u32, sad_latch: u32,
/// Internal. Current Destination Address /// Internal. Current Destination Address
dad_latch: u32, dad_latch: u32,
/// Internal. Word Count /// Internal. Word Count
_word_count: if (id == 3) u16 else u14, _word_count: WordCount,
/// Some DMA Transfers are enabled during Hblank / VBlank and / or /// Some DMA Transfers are enabled during Hblank / VBlank and / or
/// have delays. Thefore bit 15 of DMACNT isn't actually something /// have delays. Thefore bit 15 of DMACNT isn't actually something
@ -134,6 +139,8 @@ fn DmaController(comptime id: u2) type {
// Internals // Internals
.sad_latch = 0, .sad_latch = 0,
.dad_latch = 0, .dad_latch = 0,
.data_latch = 0,
._word_count = 0, ._word_count = 0,
.in_progress = false, .in_progress = false,
}; };
@ -158,7 +165,7 @@ fn DmaController(comptime id: u2) type {
// Reload Internals on Rising Edge. // Reload Internals on Rising Edge.
self.sad_latch = self.sad; self.sad_latch = self.sad;
self.dad_latch = self.dad; self.dad_latch = self.dad;
self._word_count = if (self.word_count == 0) std.math.maxInt(@TypeOf(self._word_count)) else self.word_count; self._word_count = if (self.word_count == 0) std.math.maxInt(WordCount) else self.word_count;
// Only a Start Timing of 00 has a DMA Transfer immediately begin // Only a Start Timing of 00 has a DMA Transfer immediately begin
self.in_progress = new.start_timing.read() == 0b00; self.in_progress = new.start_timing.read() == 0b00;
@ -181,11 +188,19 @@ fn DmaController(comptime id: u2) type {
const offset: u32 = if (transfer_type) @sizeOf(u32) else @sizeOf(u16); const offset: u32 = if (transfer_type) @sizeOf(u32) else @sizeOf(u16);
const mask = if (transfer_type) ~@as(u32, 3) else ~@as(u32, 1); const mask = if (transfer_type) ~@as(u32, 3) else ~@as(u32, 1);
const sad_addr = self.sad_latch & mask;
const dad_addr = self.dad_latch & mask;
if (transfer_type) { if (transfer_type) {
cpu.bus.write(u32, self.dad_latch & mask, cpu.bus.read(u32, self.sad_latch & mask)); if (sad_addr >= 0x0200_0000) self.data_latch = cpu.bus.read(u32, sad_addr);
cpu.bus.write(u32, dad_addr, self.data_latch);
} else { } else {
cpu.bus.write(u16, self.dad_latch & mask, cpu.bus.read(u16, self.sad_latch & mask)); if (sad_addr >= 0x0200_0000) {
const value: u32 = cpu.bus.read(u16, sad_addr);
self.data_latch = value << 16 | value;
}
cpu.bus.write(u16, dad_addr, @truncate(u16, rotr(u32, self.data_latch, 8 * (dad_addr & 3))));
} }
switch (sad_adj) { switch (sad_adj) {
@ -266,7 +281,7 @@ fn DmaController(comptime id: u2) type {
}; };
} }
pub fn pollDmaOnBlank(bus: *Bus, comptime kind: DmaKind) void { pub fn onBlanking(bus: *Bus, comptime kind: DmaKind) void {
bus.dma[0].poll(kind); bus.dma[0].poll(kind);
bus.dma[1].poll(kind); bus.dma[1].poll(kind);
bus.dma[2].poll(kind); bus.dma[2].poll(kind);

View File

@ -11,8 +11,8 @@ const Bus = @import("../Bus.zig");
const DmaController = @import("dma.zig").DmaController; const DmaController = @import("dma.zig").DmaController;
const Scheduler = @import("../scheduler.zig").Scheduler; const Scheduler = @import("../scheduler.zig").Scheduler;
const setHi = util.setLo; const setHi = util.setHi;
const setLo = util.setHi; const setLo = util.setLo;
const log = std.log.scoped(.@"I/O"); const log = std.log.scoped(.@"I/O");
@ -305,6 +305,14 @@ pub fn write(bus: *Bus, comptime T: type, address: u32, value: T) void {
0x0400_0009 => bus.ppu.bg[0].cnt.raw = setHi(u16, bus.ppu.bg[0].cnt.raw, value), 0x0400_0009 => bus.ppu.bg[0].cnt.raw = setHi(u16, bus.ppu.bg[0].cnt.raw, value),
0x0400_000A => bus.ppu.bg[1].cnt.raw = setLo(u16, bus.ppu.bg[1].cnt.raw, value), 0x0400_000A => bus.ppu.bg[1].cnt.raw = setLo(u16, bus.ppu.bg[1].cnt.raw, value),
0x0400_000B => bus.ppu.bg[1].cnt.raw = setHi(u16, bus.ppu.bg[1].cnt.raw, value), 0x0400_000B => bus.ppu.bg[1].cnt.raw = setHi(u16, bus.ppu.bg[1].cnt.raw, value),
0x0400_0040 => bus.ppu.win.h[0].raw = setLo(u16, bus.ppu.win.h[0].raw, value),
0x0400_0041 => bus.ppu.win.h[0].raw = setHi(u16, bus.ppu.win.h[0].raw, value),
0x0400_0042 => bus.ppu.win.h[1].raw = setLo(u16, bus.ppu.win.h[1].raw, value),
0x0400_0043 => bus.ppu.win.h[1].raw = setHi(u16, bus.ppu.win.h[1].raw, value),
0x0400_0044 => bus.ppu.win.v[0].raw = setLo(u16, bus.ppu.win.v[0].raw, value),
0x0400_0045 => bus.ppu.win.v[0].raw = setHi(u16, bus.ppu.win.v[0].raw, value),
0x0400_0046 => bus.ppu.win.v[1].raw = setLo(u16, bus.ppu.win.v[1].raw, value),
0x0400_0047 => bus.ppu.win.v[1].raw = setHi(u16, bus.ppu.win.v[1].raw, value),
0x0400_0048 => bus.ppu.win.in.raw = setLo(u16, bus.ppu.win.in.raw, value), 0x0400_0048 => bus.ppu.win.in.raw = setLo(u16, bus.ppu.win.in.raw, value),
0x0400_0049 => bus.ppu.win.in.raw = setHi(u16, bus.ppu.win.in.raw, value), 0x0400_0049 => bus.ppu.win.in.raw = setHi(u16, bus.ppu.win.in.raw, value),
0x0400_004A => bus.ppu.win.out.raw = setLo(u16, bus.ppu.win.out.raw, value), 0x0400_004A => bus.ppu.win.out.raw = setLo(u16, bus.ppu.win.out.raw, value),
@ -462,8 +470,12 @@ pub const BldY = extern union {
raw: u16, raw: u16,
}; };
const u8WriteKind = enum { Hi, Lo };
/// Write-only /// Write-only
pub const WinH = extern union { pub const WinH = extern union {
const Self = @This();
x2: Bitfield(u16, 0, 8), x2: Bitfield(u16, 0, 8),
x1: Bitfield(u16, 8, 8), x1: Bitfield(u16, 8, 8),
raw: u16, raw: u16,
@ -471,28 +483,37 @@ pub const WinH = extern union {
/// Write-only /// Write-only
pub const WinV = extern union { pub const WinV = extern union {
const Self = @This();
y2: Bitfield(u16, 0, 8), y2: Bitfield(u16, 0, 8),
y1: Bitfield(u16, 8, 8), y1: Bitfield(u16, 8, 8),
raw: u16, raw: u16,
pub fn set(self: *Self, comptime K: u8WriteKind, value: u8) void {
self.raw = switch (K) {
.Hi => (@as(u16, value) << 8) | self.raw & 0xFF,
.Lo => (self.raw & 0xFF00) | value,
};
}
}; };
pub const WinIn = extern union { pub const WinIn = extern union {
w0_bg: Bitfield(u16, 0, 4), w0_bg: Bitfield(u16, 0, 4),
w0_obj: Bit(u16, 4), w0_obj: Bit(u16, 4),
w0_colour: Bit(u16, 5), w0_bld: Bit(u16, 5),
w1_bg: Bitfield(u16, 8, 4), w1_bg: Bitfield(u16, 8, 4),
w1_obj: Bit(u16, 12), w1_obj: Bit(u16, 12),
w1_colour: Bit(u16, 13), w1_bld: Bit(u16, 13),
raw: u16, raw: u16,
}; };
pub const WinOut = extern union { pub const WinOut = extern union {
out_bg: Bitfield(u16, 0, 4), out_bg: Bitfield(u16, 0, 4),
out_obj: Bit(u16, 4), out_obj: Bit(u16, 4),
out_colour: Bit(u16, 5), out_bld: Bit(u16, 5),
obj_bg: Bitfield(u16, 8, 4), obj_bg: Bitfield(u16, 8, 4),
obj_obj: Bit(u16, 12), obj_obj: Bit(u16, 12),
obj_colour: Bit(u16, 13), obj_bld: Bit(u16, 13),
raw: u16, raw: u16,
}; };

View File

@ -35,11 +35,8 @@ pub fn halfAndSignedDataTransfer(comptime P: bool, comptime U: bool, comptime I:
}, },
0b11 => { 0b11 => {
// LDRSH // LDRSH
result = if (address & 1 == 1) blk: { const value = bus.read(u16, address);
break :blk sext(u32, u8, bus.read(u8, address)); result = if (address & 1 == 1) sext(u32, u8, @truncate(u8, value >> 8)) else sext(u32, u16, value);
} else blk: {
break :blk sext(u32, u16, bus.read(u16, address));
};
}, },
0b00 => unreachable, // SWP 0b00 => unreachable, // SWP
} }

View File

@ -46,11 +46,8 @@ pub fn fmt78(comptime op: u2, comptime T: bool) InstrFn {
}, },
0b11 => { 0b11 => {
// LDRSH // LDRSH
cpu.r[rd] = if (address & 1 == 1) blk: { const value = bus.read(u16, address);
break :blk sext(u32, u8, bus.read(u8, address)); cpu.r[rd] = if (address & 1 == 1) sext(u32, u8, @truncate(u8, value >> 8)) else sext(u32, u16, value);
} else blk: {
break :blk sext(u32, u16, bus.read(u16, address));
};
}, },
} }
} else { } else {

View File

@ -1,16 +1,19 @@
const std = @import("std"); const std = @import("std");
const io = @import("bus/io.zig"); const io = @import("bus/io.zig");
const Bit = @import("bitfield").Bit;
const Bitfield = @import("bitfield").Bitfield;
const dma = @import("bus/dma.zig");
const Oam = @import("ppu/Oam.zig");
const Palette = @import("ppu/Palette.zig");
const Vram = @import("ppu/Vram.zig");
const EventKind = @import("scheduler.zig").EventKind; const EventKind = @import("scheduler.zig").EventKind;
const Scheduler = @import("scheduler.zig").Scheduler; const Scheduler = @import("scheduler.zig").Scheduler;
const Arm7tdmi = @import("cpu.zig").Arm7tdmi; const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
const FrameBuffer = @import("../util.zig").FrameBuffer;
const Bit = @import("bitfield").Bit;
const Bitfield = @import("bitfield").Bitfield;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const log = std.log.scoped(.PPU); const log = std.log.scoped(.Ppu);
const pollDmaOnBlank = @import("bus/dma.zig").pollDmaOnBlank;
pub const width = 240; pub const width = 240;
pub const height = 160; pub const height = 160;
@ -48,14 +51,14 @@ pub const Ppu = struct {
sched.push(.Draw, 240 * 4); sched.push(.Draw, 240 * 4);
const sprites = try allocator.create([128]?Sprite); const sprites = try allocator.create([128]?Sprite);
sprites.* = [_]?Sprite{null} ** 128; std.mem.set(?Sprite, sprites, null);
return Self{ return Self{
.vram = try Vram.init(allocator), .vram = try Vram.init(allocator),
.palette = try Palette.init(allocator), .palette = try Palette.init(allocator),
.oam = try Oam.init(allocator), .oam = try Oam.init(allocator),
.sched = sched, .sched = sched,
.framebuf = try FrameBuffer.init(allocator), .framebuf = try FrameBuffer.init(allocator, framebuf_pitch * height),
.allocator = allocator, .allocator = allocator,
// Registers // Registers
@ -274,16 +277,17 @@ pub const Ppu = struct {
aff_x += self.aff_bg[n - 2].pa; aff_x += self.aff_bg[n - 2].pa;
aff_y += self.aff_bg[n - 2].pc; aff_y += self.aff_bg[n - 2].pc;
if (!shouldDrawBackground(n, self.bldcnt, &self.scanline, i)) continue; const x = @bitCast(u32, ix);
const y = @bitCast(u32, iy);
const win_bounds = self.windowBounds(@truncate(u9, x), @truncate(u8, y));
if (!shouldDrawBackground(self, n, win_bounds, i)) continue;
if (self.bg[n].cnt.display_overflow.read()) { if (self.bg[n].cnt.display_overflow.read()) {
ix = if (ix > px_width) @rem(ix, px_width) else if (ix < 0) px_width + @rem(ix, px_width) else ix; ix = if (ix > px_width) @rem(ix, px_width) else if (ix < 0) px_width + @rem(ix, px_width) else ix;
iy = if (iy > px_height) @rem(iy, px_height) else if (iy < 0) px_height + @rem(iy, px_height) else iy; iy = if (iy > px_height) @rem(iy, px_height) else if (iy < 0) px_height + @rem(iy, px_height) else iy;
} else if (ix > px_width or iy > px_height or ix < 0 or iy < 0) continue; } else if (ix > px_width or iy > px_height or ix < 0 or iy < 0) continue;
const x = @bitCast(u32, ix);
const y = @bitCast(u32, iy);
const tile_id: u32 = self.vram.read(u8, screen_base + ((y / 8) * @bitCast(u32, tile_width) + (x / 8))); const tile_id: u32 = self.vram.read(u8, screen_base + ((y / 8) * @bitCast(u32, tile_width) + (x / 8)));
const row = y & 7; const row = y & 7;
const col = x & 7; const col = x & 7;
@ -293,7 +297,7 @@ pub const Ppu = struct {
if (pal_id != 0) { if (pal_id != 0) {
const bgr555 = self.palette.read(u16, pal_id * 2); const bgr555 = self.palette.read(u16, pal_id * 2);
copyToBackgroundBuffer(n, self.bldcnt, &self.scanline, i, bgr555); self.copyToBackgroundBuffer(n, win_bounds, i, bgr555);
} }
} }
@ -302,7 +306,7 @@ pub const Ppu = struct {
self.aff_bg[n - 2].y_latch.? += self.aff_bg[n - 2].pd; // PD is added to BGxY self.aff_bg[n - 2].y_latch.? += self.aff_bg[n - 2].pd; // PD is added to BGxY
} }
fn drawBackround(self: *Self, comptime n: u2) void { fn drawBackground(self: *Self, comptime n: u2) void {
// A Tile in a charblock is a byte, while a Screen Entry is a halfword // A Tile in a charblock is a byte, while a Screen Entry is a halfword
const char_base = 0x4000 * @as(u32, self.bg[n].cnt.char_base.read()); const char_base = 0x4000 * @as(u32, self.bg[n].cnt.char_base.read());
@ -322,10 +326,11 @@ pub const Ppu = struct {
var i: u32 = 0; var i: u32 = 0;
while (i < width) : (i += 1) { while (i < width) : (i += 1) {
if (!shouldDrawBackground(n, self.bldcnt, &self.scanline, i)) continue;
const x = hofs + i; const x = hofs + i;
const win_bounds = self.windowBounds(@truncate(u9, x), @truncate(u8, y));
if (!shouldDrawBackground(self, n, win_bounds, i)) continue;
// Grab the Screen Entry from VRAM // Grab the Screen Entry from VRAM
const entry_addr = screen_base + tilemapOffset(size, x, y); const entry_addr = screen_base + tilemapOffset(size, x, y);
const entry = @bitCast(ScreenEntry, self.vram.read(u16, entry_addr)); const entry = @bitCast(ScreenEntry, self.vram.read(u16, entry_addr));
@ -350,7 +355,7 @@ pub const Ppu = struct {
if (pal_id != 0) { if (pal_id != 0) {
const bgr555 = self.palette.read(u16, pal_id * 2); const bgr555 = self.palette.read(u16, pal_id * 2);
copyToBackgroundBuffer(n, self.bldcnt, &self.scanline, i, bgr555); self.copyToBackgroundBuffer(n, win_bounds, i, bgr555);
} }
} }
} }
@ -376,10 +381,10 @@ pub const Ppu = struct {
var layer: usize = 0; var layer: usize = 0;
while (layer < 4) : (layer += 1) { while (layer < 4) : (layer += 1) {
self.drawSprites(@truncate(u2, layer)); self.drawSprites(@truncate(u2, layer));
if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackround(0); if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackground(0);
if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackround(1); if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackground(1);
if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawBackround(2); if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawBackground(2);
if (layer == self.bg[3].cnt.priority.read() and bg_enable >> 3 & 1 == 1) self.drawBackround(3); if (layer == self.bg[3].cnt.priority.read() and bg_enable >> 3 & 1 == 1) self.drawBackground(3);
} }
// Copy Drawn Scanline to Frame Buffer // Copy Drawn Scanline to Frame Buffer
@ -404,8 +409,8 @@ pub const Ppu = struct {
var layer: usize = 0; var layer: usize = 0;
while (layer < 4) : (layer += 1) { while (layer < 4) : (layer += 1) {
self.drawSprites(@truncate(u2, layer)); self.drawSprites(@truncate(u2, layer));
if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackround(0); if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackground(0);
if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackround(1); if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackground(1);
if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawAffineBackground(2); if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawAffineBackground(2);
} }
@ -483,7 +488,7 @@ pub const Ppu = struct {
while (i < width) : (i += 1) { while (i < width) : (i += 1) {
// If we're outside of the bounds of mode 5, draw the background colour // If we're outside of the bounds of mode 5, draw the background colour
const bgr555 = const bgr555 =
if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.getBackdrop(); if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.backdrop();
std.mem.writeIntNative(u32, self.framebuf.get(.Emulator)[fb_base + i * @sizeOf(u32) ..][0..@sizeOf(u32)], rgba888(bgr555)); std.mem.writeIntNative(u32, self.framebuf.get(.Emulator)[fb_base + i * @sizeOf(u32) ..][0..@sizeOf(u32)], rgba888(bgr555));
} }
@ -527,7 +532,94 @@ pub const Ppu = struct {
} }
if (maybe_top) |top| return top; if (maybe_top) |top| return top;
return self.palette.getBackdrop(); return self.palette.backdrop();
}
fn copyToBackgroundBuffer(self: *Self, comptime n: u2, bounds: ?WindowBounds, i: usize, bgr555: u16) void {
if (self.bldcnt.mode.read() != 0b00) {
// Standard Alpha Blending
const a_layers = self.bldcnt.layer_a.read();
const is_blend_enabled = (a_layers >> n) & 1 == 1;
// If Alpha Blending is enabled and we've found an eligible layer for
// Pixel A, store the pixel in the bottom pixel buffer
const win_part = if (bounds) |win| blk: {
// Window Enabled
break :blk switch (win) {
.win0 => self.win.in.w0_bld.read(),
.win1 => self.win.in.w1_bld.read(),
.out => self.win.out.out_bld.read(),
};
} else true;
if (win_part and is_blend_enabled) {
self.scanline.btm()[i] = bgr555;
return;
}
}
self.scanline.top()[i] = bgr555;
}
const WindowBounds = enum { win0, win1, out };
fn windowBounds(self: *Self, x: u9, y: u8) ?WindowBounds {
const win0 = self.dispcnt.win_enable.read() & 1 == 1;
const win1 = (self.dispcnt.win_enable.read() >> 1) & 1 == 1;
const winObj = self.dispcnt.obj_win_enable.read();
if (!(win0 or win1 or winObj)) return null;
if (win0 and self.win.inRange(0, x, y)) return .win0;
if (win1 and self.win.inRange(1, x, y)) return .win1;
return .out;
}
fn shouldDrawBackground(self: *Self, comptime n: u2, bounds: ?WindowBounds, i: usize) bool {
// If a pixel has been drawn on the top layer, it's because:
// 1. The pixel is to be blended with a pixel on the bottom layer
// 2. The pixel is not to be blended at all
// Also, if we find a pixel on the top layer we don't need to bother with this I think?
if (self.scanline.top()[i] != null) return false;
if (bounds) |win| {
switch (win) {
.win0 => if ((self.win.in.w0_bg.read() >> n) & 1 == 0) return false,
.win1 => if ((self.win.in.w1_bg.read() >> n) & 1 == 0) return false,
.out => if ((self.win.out.out_bg.read() >> n) & 1 == 0) return false,
}
}
if (self.scanline.btm()[i] != null) {
// The pixel found in the bottom layer is:
// 1. From a higher priority background
// 2. From a background that is marked for blending (Pixel A)
// If Alpha Blending isn't enabled, then we've already found a higher prio
// pixel, we can return early
if (self.bldcnt.mode.read() != 0b01) return false;
const b_layers = self.bldcnt.layer_b.read();
const win_part = if (bounds) |win| blk: {
// Window Enabled
break :blk switch (win) {
.win0 => self.win.in.w0_bld.read(),
.win1 => self.win.in.w1_bld.read(),
.out => self.win.out.out_bld.read(),
};
} else true;
// If the Background is not marked for blending, we've already found
// a higher priority pixel, move on.
const is_blend_enabled = win_part and ((b_layers >> n) & 1 == 1);
if (!is_blend_enabled) return false;
}
return true;
} }
// TODO: Comment this + get a better understanding // TODO: Comment this + get a better understanding
@ -569,7 +661,7 @@ pub const Ppu = struct {
// See if HBlank DMA is present and not enabled // See if HBlank DMA is present and not enabled
if (!self.dispstat.vblank.read()) if (!self.dispstat.vblank.read())
pollDmaOnBlank(cpu.bus, .HBlank); dma.onBlanking(cpu.bus, .HBlank);
self.dispstat.hblank.set(); self.dispstat.hblank.set();
self.sched.push(.HBlank, 68 * 4 -| late); self.sched.push(.HBlank, 68 * 4 -| late);
@ -611,7 +703,7 @@ pub const Ppu = struct {
self.aff_bg[1].latchRefPoints(); self.aff_bg[1].latchRefPoints();
// See if Vblank DMA is present and not enabled // See if Vblank DMA is present and not enabled
pollDmaOnBlank(cpu.bus, .VBlank); dma.onBlanking(cpu.bus, .VBlank);
} }
if (scanline == 227) self.dispstat.vblank.unset(); if (scanline == 227) self.dispstat.vblank.unset();
@ -620,158 +712,6 @@ pub const Ppu = struct {
} }
}; };
const Palette = struct {
const palram_size = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, palram_size);
std.mem.set(u8, buf, 0);
return Self{
.buf = buf,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("PALRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => {
const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("PALRAM: Unsupported write width"),
}
}
fn getBackdrop(self: *const Self) u16 {
return self.read(u16, 0);
}
};
const Vram = struct {
const vram_size = 0x18000;
const Self = @This();
buf: []u8,
allocator: Allocator,
fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, vram_size);
std.mem.set(u8, buf, 0);
return Self{
.buf = buf,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = Self.mirror(address);
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("VRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
const mode: u3 = dispcnt.bg_mode.read();
const idx = Self.mirror(address);
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
u8 => {
// Ignore write if it falls within the boundaries of OBJ VRAM
switch (mode) {
0, 1, 2 => if (0x0001_0000 <= idx) return,
else => if (0x0001_4000 <= idx) return,
}
const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("VRAM: Unsupported write width"),
}
}
fn mirror(address: usize) usize {
// Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
const addr = address & 0x1FFFF;
// If the address is within 96K we don't do anything,
// otherwise we want to mirror the last 32K (addresses between 64K and 96K)
return if (addr < vram_size) addr else 0x10000 + (addr & 0x7FFF);
}
};
const Oam = struct {
const oam_size = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, oam_size);
std.mem.set(u8, buf, 0);
return Self{
.buf = buf,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("OAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => return, // 8-bit writes are explicitly ignored
else => @compileError("OAM: Unsupported write width"),
}
}
};
const Window = struct { const Window = struct {
const Self = @This(); const Self = @This();
@ -791,6 +731,25 @@ const Window = struct {
}; };
} }
fn inRange(self: *const Self, comptime id: u1, x: u9, y: u8) bool {
const h = self.h[id];
const v = self.v[id];
const y1 = v.y1.read();
const y2 = if (y1 > v.y2.read()) 160 else std.math.min(160, v.y2.read());
if (y1 <= y and y < y2) {
// Within Y bounds
const x1 = h.x1.read();
const x2 = if (x1 > h.x2.read()) 240 else std.math.min(240, h.x2.read());
// Within X Bounds
return x1 <= x and x < x2;
}
return false;
}
pub fn setH(self: *Self, value: u32) void { pub fn setH(self: *Self, value: u32) void {
self.h[0].raw = @truncate(u16, value); self.h[0].raw = @truncate(u16, value);
self.h[1].raw = @truncate(u16, value >> 16); self.h[1].raw = @truncate(u16, value >> 16);
@ -1087,37 +1046,6 @@ fn alphaBlend(top: u16, btm: u16, bldalpha: io.BldAlpha) u16 {
return (bld_b << 10) | (bld_g << 5) | bld_r; return (bld_b << 10) | (bld_g << 5) | bld_r;
} }
fn shouldDrawBackground(comptime n: u2, bldcnt: io.BldCnt, scanline: *Scanline, i: usize) bool {
// If a pixel has been drawn on the top layer, it's because
// Either the pixel is to be blended with a pixel on the bottom layer
// or the pixel is not to be blended at all
// Consequentially, if we find a pixel on the top layer, there's no need
// to render anything I think?
if (scanline.top()[i] != null) return false;
if (scanline.btm()[i] != null) {
// The Pixel found in the Bottom layer is
// 1. From a higher priority
// 2. From a Backround that is marked for Blending (Pixel A)
//
// We now have to confirm whether this current Background can be used
// as Pixel B or not.
// If Alpha Blending isn't enabled, we've aready found a higher
// priority pixel to render. Move on
if (bldcnt.mode.read() != 0b01) return false;
const b_layers = bldcnt.layer_b.read();
const is_blend_enabled = (b_layers >> n) & 1 == 1;
// If the Background is not marked for blending, we've already found
// a higher priority pixel, move on.
if (!is_blend_enabled) return false;
}
return true;
}
fn shouldDrawSprite(bldcnt: io.BldCnt, scanline: *Scanline, x: u9) bool { fn shouldDrawSprite(bldcnt: io.BldCnt, scanline: *Scanline, x: u9) bool {
if (scanline.top()[x] != null) return false; if (scanline.top()[x] != null) return false;
@ -1132,23 +1060,6 @@ fn shouldDrawSprite(bldcnt: io.BldCnt, scanline: *Scanline, x: u9) bool {
return true; return true;
} }
fn copyToBackgroundBuffer(comptime n: u2, bldcnt: io.BldCnt, scanline: *Scanline, i: usize, bgr555: u16) void {
if (bldcnt.mode.read() != 0b00) {
// Standard Alpha Blending
const a_layers = bldcnt.layer_a.read();
const is_blend_enabled = (a_layers >> n) & 1 == 1;
// If Alpha Blending is enabled and we've found an eligible layer for
// Pixel A, store the pixel in the bottom pixel buffer
if (is_blend_enabled) {
scanline.btm()[i] = bgr555;
return;
}
}
scanline.top()[i] = bgr555;
}
fn copyToSpriteBuffer(bldcnt: io.BldCnt, scanline: *Scanline, x: u9, bgr555: u16) void { fn copyToSpriteBuffer(bldcnt: io.BldCnt, scanline: *Scanline, x: u9, bgr555: u16) void {
if (bldcnt.mode.read() != 0b00) { if (bldcnt.mode.read() != 0b00) {
// Alpha Blending // Alpha Blending
@ -1201,48 +1112,3 @@ const Scanline = struct {
return self.layers[1]; return self.layers[1];
} }
}; };
// Double Buffering Implementation
const FrameBuffer = struct {
const Self = @This();
layers: [2][]u8,
buf: []u8,
current: u1,
allocator: Allocator,
// TODO: Rename
const Device = enum {
Emulator,
Renderer,
};
pub fn init(allocator: Allocator) !Self {
const framebuf_len = framebuf_pitch * height;
const buf = try allocator.alloc(u8, framebuf_len * 2);
std.mem.set(u8, buf, 0);
return .{
// Front and Back Framebuffers
.layers = [_][]u8{ buf[0..][0..framebuf_len], buf[framebuf_len..][0..framebuf_len] },
.buf = buf,
.current = 0,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn swap(self: *Self) void {
self.current = ~self.current;
}
pub fn get(self: *Self, comptime dev: Device) []u8 {
return self.layers[if (dev == .Emulator) self.current else ~self.current];
}
};

40
src/core/ppu/Oam.zig Normal file
View File

@ -0,0 +1,40 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const buf_len = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("OAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => return, // 8-bit writes are explicitly ignored
else => @compileError("OAM: Unsupported write width"),
}
}
pub fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, buf_len);
std.mem.set(u8, buf, 0);
return Self{ .buf = buf, .allocator = allocator };
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}

47
src/core/ppu/Palette.zig Normal file
View File

@ -0,0 +1,47 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const buf_len = 0x400;
const Self = @This();
buf: []u8,
allocator: Allocator,
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = address & 0x3FF;
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("PALRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
const addr = address & 0x3FF;
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
u8 => {
const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("PALRAM: Unsupported write width"),
}
}
pub fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, buf_len);
std.mem.set(u8, buf, 0);
return Self{ .buf = buf, .allocator = allocator };
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn backdrop(self: *const Self) u16 {
return self.read(u16, 0);
}

60
src/core/ppu/Vram.zig Normal file
View File

@ -0,0 +1,60 @@
const std = @import("std");
const io = @import("../bus/io.zig");
const Allocator = std.mem.Allocator;
const buf_len = 0x18000;
const Self = @This();
buf: []u8,
allocator: Allocator,
pub fn read(self: *const Self, comptime T: type, address: usize) T {
const addr = Self.mirror(address);
return switch (T) {
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
else => @compileError("VRAM: Unsupported read width"),
};
}
pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
const mode: u3 = dispcnt.bg_mode.read();
const idx = Self.mirror(address);
switch (T) {
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
u8 => {
// Ignore write if it falls within the boundaries of OBJ VRAM
switch (mode) {
0, 1, 2 => if (0x0001_0000 <= idx) return,
else => if (0x0001_4000 <= idx) return,
}
const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
},
else => @compileError("VRAM: Unsupported write width"),
}
}
pub fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, buf_len);
std.mem.set(u8, buf, 0);
return Self{ .buf = buf, .allocator = allocator };
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
fn mirror(address: usize) usize {
// Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
const addr = address & 0x1FFFF;
// If the address is within 96K we don't do anything,
// otherwise we want to mirror the last 32K (addresses between 64K and 96K)
return if (addr < buf_len) addr else 0x10000 + (addr & 0x7FFF);
}

View File

@ -5,6 +5,8 @@ const config = @import("config.zig");
const Log2Int = std.math.Log2Int; const Log2Int = std.math.Log2Int;
const Arm7tdmi = @import("core/cpu.zig").Arm7tdmi; const Arm7tdmi = @import("core/cpu.zig").Arm7tdmi;
const Allocator = std.mem.Allocator;
// Sign-Extend value of type `T` to type `U` // Sign-Extend value of type `T` to type `U`
pub fn sext(comptime T: type, comptime U: type, value: T) T { pub fn sext(comptime T: type, comptime U: type, value: T) T {
// U must have less bits than T // U must have less bits than T
@ -165,6 +167,7 @@ pub const io = struct {
pub const Logger = struct { pub const Logger = struct {
const Self = @This(); const Self = @This();
const FmtArgTuple = std.meta.Tuple(&.{ u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32 });
buf: std.io.BufferedWriter(4096 << 2, std.fs.File.Writer), buf: std.io.BufferedWriter(4096 << 2, std.fs.File.Writer),
@ -223,8 +226,6 @@ pub const Logger = struct {
} }
}; };
const FmtArgTuple = std.meta.Tuple(&.{ u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32 });
pub const audio = struct { pub const audio = struct {
const _io = @import("core/bus/io.zig"); const _io = @import("core/bus/io.zig");
@ -275,7 +276,7 @@ pub const audio = struct {
}; };
/// Sets the high bits of an integer to a value /// Sets the high bits of an integer to a value
pub inline fn setHi(comptime T: type, left: T, right: HalfInt(T)) T { pub inline fn setLo(comptime T: type, left: T, right: HalfInt(T)) T {
return switch (T) { return switch (T) {
u32 => (left & 0xFFFF_0000) | right, u32 => (left & 0xFFFF_0000) | right,
u16 => (left & 0xFF00) | right, u16 => (left & 0xFF00) | right,
@ -285,7 +286,7 @@ pub inline fn setHi(comptime T: type, left: T, right: HalfInt(T)) T {
} }
/// sets the low bits of an integer to a value /// sets the low bits of an integer to a value
pub inline fn setLo(comptime T: type, left: T, right: HalfInt(T)) T { pub inline fn setHi(comptime T: type, left: T, right: HalfInt(T)) T {
return switch (T) { return switch (T) {
u32 => (left & 0x0000_FFFF) | @as(u32, right) << 16, u32 => (left & 0x0000_FFFF) | @as(u32, right) << 16,
u16 => (left & 0x00FF) | @as(u16, right) << 8, u16 => (left & 0x00FF) | @as(u16, right) << 8,
@ -302,3 +303,44 @@ fn HalfInt(comptime T: type) type {
return std.meta.Int(type_info.Int.signedness, type_info.Int.bits >> 1); return std.meta.Int(type_info.Int.signedness, type_info.Int.bits >> 1);
} }
/// Double Buffering Implementation
pub const FrameBuffer = struct {
const Self = @This();
layers: [2][]u8,
buf: []u8,
current: u1,
allocator: Allocator,
// TODO: Rename
const Device = enum { Emulator, Renderer };
pub fn init(allocator: Allocator, comptime len: comptime_int) !Self {
const buf = try allocator.alloc(u8, len * 2);
std.mem.set(u8, buf, 0);
return .{
// Front and Back Framebuffers
.layers = [_][]u8{ buf[0..][0..len], buf[len..][0..len] },
.buf = buf,
.current = 0,
.allocator = allocator,
};
}
pub fn deinit(self: *Self) void {
self.allocator.free(self.buf);
self.* = undefined;
}
pub fn swap(self: *Self) void {
self.current = ~self.current;
}
pub fn get(self: *Self, comptime dev: Device) []u8 {
return self.layers[if (dev == .Emulator) self.current else ~self.current];
}
};