Compare commits
9 Commits
d645827a19
...
41a174fea5
Author | SHA1 | Date |
---|---|---|
Rekai Nyangadzayi Musuka | 41a174fea5 | |
Rekai Nyangadzayi Musuka | e5d89beeb9 | |
Rekai Nyangadzayi Musuka | 35dfb3ef6a | |
Rekai Nyangadzayi Musuka | add7ea6f1d | |
Rekai Nyangadzayi Musuka | d66021dc9e | |
Rekai Nyangadzayi Musuka | 43d81ebcb1 | |
Rekai Nyangadzayi Musuka | 8ddc3e5919 | |
Rekai Nyangadzayi Musuka | 64c86949e7 | |
Rekai Nyangadzayi Musuka | 92cfc763c0 |
|
@ -12,7 +12,7 @@ const Apu = @import("apu.zig").Apu;
|
|||
const DmaTuple = @import("bus/dma.zig").DmaTuple;
|
||||
const TimerTuple = @import("bus/timer.zig").TimerTuple;
|
||||
const Scheduler = @import("scheduler.zig").Scheduler;
|
||||
const FilePaths = @import("util.zig").FilePaths;
|
||||
const FilePaths = @import("../util.zig").FilePaths;
|
||||
|
||||
const io = @import("bus/io.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
@ -20,7 +20,7 @@ const log = std.log.scoped(.Bus);
|
|||
|
||||
const createDmaTuple = @import("bus/dma.zig").create;
|
||||
const createTimerTuple = @import("bus/timer.zig").create;
|
||||
const rotr = @import("util.zig").rotr;
|
||||
const rotr = @import("../util.zig").rotr;
|
||||
|
||||
const timings: [2][0x10]u8 = [_][0x10]u8{
|
||||
// BIOS, Unused, EWRAM, IWRAM, I/0, PALRAM, VRAM, OAM, ROM0, ROM0, ROM1, ROM1, ROM2, ROM2, SRAM, Unused
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
const std = @import("std");
|
||||
const SDL = @import("sdl2");
|
||||
const io = @import("bus/io.zig");
|
||||
const util = @import("util.zig");
|
||||
const util = @import("../util.zig");
|
||||
|
||||
const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
|
||||
const Scheduler = @import("scheduler.zig").Scheduler;
|
||||
|
@ -9,7 +9,7 @@ const Scheduler = @import("scheduler.zig").Scheduler;
|
|||
const SoundFifo = std.fifo.LinearFifo(u8, .{ .Static = 0x20 });
|
||||
const AudioDeviceId = SDL.SDL_AudioDeviceID;
|
||||
|
||||
const intToBytes = @import("util.zig").intToBytes;
|
||||
const intToBytes = @import("../util.zig").intToBytes;
|
||||
const log = std.log.scoped(.APU);
|
||||
|
||||
pub const host_sample_rate = 1 << 15;
|
||||
|
|
|
@ -2,8 +2,8 @@ const std = @import("std");
|
|||
const Allocator = std.mem.Allocator;
|
||||
const log = std.log.scoped(.Backup);
|
||||
|
||||
const escape = @import("../util.zig").escape;
|
||||
const span = @import("../util.zig").span;
|
||||
const escape = @import("../../util.zig").escape;
|
||||
const span = @import("../../util.zig").span;
|
||||
|
||||
const backup_kinds = [6]Needle{
|
||||
.{ .str = "EEPROM_V", .kind = .Eeprom },
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const std = @import("std");
|
||||
const util = @import("../util.zig");
|
||||
const util = @import("../../util.zig");
|
||||
|
||||
const DmaControl = @import("io.zig").DmaControl;
|
||||
const Bus = @import("../Bus.zig");
|
||||
|
@ -227,7 +227,7 @@ fn DmaController(comptime id: u2) type {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn pollBlankingDma(self: *Self, comptime kind: DmaKind) void {
|
||||
pub fn pollBlanking(self: *Self, comptime kind: DmaKind) void {
|
||||
if (self.in_progress) return; // If there's an ongoing DMA Transfer, exit early
|
||||
|
||||
// No ongoing DMA Transfer, We want to check if we should repeat an existing one
|
||||
|
@ -271,11 +271,11 @@ fn DmaController(comptime id: u2) type {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn pollBlankingDma(bus: *Bus, comptime kind: DmaKind) void {
|
||||
bus.dma[0].pollBlankingDma(kind);
|
||||
bus.dma[1].pollBlankingDma(kind);
|
||||
bus.dma[2].pollBlankingDma(kind);
|
||||
bus.dma[3].pollBlankingDma(kind);
|
||||
pub fn onBlanking(bus: *Bus, comptime kind: DmaKind) void {
|
||||
bus.dma[0].pollBlanking(kind);
|
||||
bus.dma[1].pollBlanking(kind);
|
||||
bus.dma[2].pollBlanking(kind);
|
||||
bus.dma[3].pollBlanking(kind);
|
||||
}
|
||||
|
||||
const Adjustment = enum(u2) {
|
||||
|
|
|
@ -3,7 +3,7 @@ const builtin = @import("builtin");
|
|||
const timer = @import("timer.zig");
|
||||
const dma = @import("dma.zig");
|
||||
const apu = @import("../apu.zig");
|
||||
const util = @import("../util.zig");
|
||||
const util = @import("../../util.zig");
|
||||
|
||||
const Bit = @import("bitfield").Bit;
|
||||
const Bitfield = @import("bitfield").Bitfield;
|
||||
|
@ -302,6 +302,14 @@ pub fn write(bus: *Bus, comptime T: type, address: u32, value: T) void {
|
|||
0x0400_0009 => bus.ppu.bg[0].cnt.raw = (@as(u16, value) << 8) | (bus.ppu.bg[0].cnt.raw & 0xFF),
|
||||
0x0400_000A => bus.ppu.bg[1].cnt.raw = (bus.ppu.bg[1].cnt.raw & 0xFF00) | value,
|
||||
0x0400_000B => bus.ppu.bg[1].cnt.raw = (@as(u16, value) << 8) | (bus.ppu.bg[1].cnt.raw & 0xFF),
|
||||
0x0400_0040 => bus.ppu.win.h[0].set(.Lo, value),
|
||||
0x0400_0041 => bus.ppu.win.h[0].set(.Hi, value),
|
||||
0x0400_0042 => bus.ppu.win.h[1].set(.Lo, value),
|
||||
0x0400_0043 => bus.ppu.win.h[1].set(.Hi, value),
|
||||
0x0400_0044 => bus.ppu.win.v[0].set(.Lo, value),
|
||||
0x0400_0045 => bus.ppu.win.v[0].set(.Hi, value),
|
||||
0x0400_0046 => bus.ppu.win.v[1].set(.Lo, value),
|
||||
0x0400_0047 => bus.ppu.win.v[1].set(.Hi, value),
|
||||
0x0400_0048 => bus.ppu.win.setInL(value),
|
||||
0x0400_0049 => bus.ppu.win.setInH(value),
|
||||
0x0400_004A => bus.ppu.win.setOutL(value),
|
||||
|
@ -459,37 +467,57 @@ pub const BldY = extern union {
|
|||
raw: u16,
|
||||
};
|
||||
|
||||
const u8WriteKind = enum { Hi, Lo };
|
||||
|
||||
/// Write-only
|
||||
pub const WinH = extern union {
|
||||
const Self = @This();
|
||||
|
||||
x2: Bitfield(u16, 0, 8),
|
||||
x1: Bitfield(u16, 8, 8),
|
||||
raw: u16,
|
||||
|
||||
pub fn set(self: *Self, comptime K: u8WriteKind, value: u8) void {
|
||||
self.raw = switch (K) {
|
||||
.Hi => (@as(u16, value) << 8) | self.raw & 0xFF,
|
||||
.Lo => (self.raw & 0xFF00) | value,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// Write-only
|
||||
pub const WinV = extern union {
|
||||
const Self = @This();
|
||||
|
||||
y2: Bitfield(u16, 0, 8),
|
||||
y1: Bitfield(u16, 8, 8),
|
||||
raw: u16,
|
||||
|
||||
pub fn set(self: *Self, comptime K: u8WriteKind, value: u8) void {
|
||||
self.raw = switch (K) {
|
||||
.Hi => (@as(u16, value) << 8) | self.raw & 0xFF,
|
||||
.Lo => (self.raw & 0xFF00) | value,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const WinIn = extern union {
|
||||
w0_bg: Bitfield(u16, 0, 4),
|
||||
w0_obj: Bit(u16, 4),
|
||||
w0_colour: Bit(u16, 5),
|
||||
w0_bld: Bit(u16, 5),
|
||||
w1_bg: Bitfield(u16, 8, 4),
|
||||
w1_obj: Bit(u16, 12),
|
||||
w1_colour: Bit(u16, 13),
|
||||
w1_bld: Bit(u16, 13),
|
||||
raw: u16,
|
||||
};
|
||||
|
||||
pub const WinOut = extern union {
|
||||
out_bg: Bitfield(u16, 0, 4),
|
||||
out_obj: Bit(u16, 4),
|
||||
out_colour: Bit(u16, 5),
|
||||
out_bld: Bit(u16, 5),
|
||||
obj_bg: Bitfield(u16, 8, 4),
|
||||
obj_obj: Bit(u16, 12),
|
||||
obj_colour: Bit(u16, 13),
|
||||
obj_bld: Bit(u16, 13),
|
||||
raw: u16,
|
||||
};
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const std = @import("std");
|
||||
const util = @import("../util.zig");
|
||||
const util = @import("../../util.zig");
|
||||
|
||||
const TimerControl = @import("io.zig").TimerControl;
|
||||
const Io = @import("io.zig").Io;
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
const std = @import("std");
|
||||
const util = @import("util.zig");
|
||||
const util = @import("../util.zig");
|
||||
|
||||
const Bus = @import("Bus.zig");
|
||||
const Bit = @import("bitfield").Bit;
|
||||
const Bitfield = @import("bitfield").Bitfield;
|
||||
const Scheduler = @import("scheduler.zig").Scheduler;
|
||||
const FilePaths = @import("util.zig").FilePaths;
|
||||
const Logger = @import("util.zig").Logger;
|
||||
const FilePaths = @import("../util.zig").FilePaths;
|
||||
const Logger = @import("../util.zig").Logger;
|
||||
|
||||
const File = std.fs.File;
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ const Bus = @import("../../Bus.zig");
|
|||
const Arm7tdmi = @import("../../cpu.zig").Arm7tdmi;
|
||||
const InstrFn = @import("../../cpu.zig").arm.InstrFn;
|
||||
|
||||
const sext = @import("../../util.zig").sext;
|
||||
const sext = @import("../../../util.zig").sext;
|
||||
|
||||
pub fn branch(comptime L: bool) InstrFn {
|
||||
return struct {
|
||||
|
|
|
@ -4,8 +4,8 @@ const Bus = @import("../../Bus.zig");
|
|||
const Arm7tdmi = @import("../../cpu.zig").Arm7tdmi;
|
||||
const InstrFn = @import("../../cpu.zig").arm.InstrFn;
|
||||
|
||||
const sext = @import("../../util.zig").sext;
|
||||
const rotr = @import("../../util.zig").rotr;
|
||||
const sext = @import("../../../util.zig").sext;
|
||||
const rotr = @import("../../../util.zig").rotr;
|
||||
|
||||
pub fn halfAndSignedDataTransfer(comptime P: bool, comptime U: bool, comptime I: bool, comptime W: bool, comptime L: bool) InstrFn {
|
||||
return struct {
|
||||
|
|
|
@ -7,7 +7,7 @@ const PSR = @import("../../cpu.zig").PSR;
|
|||
|
||||
const log = std.log.scoped(.PsrTransfer);
|
||||
|
||||
const rotr = @import("../../util.zig").rotr;
|
||||
const rotr = @import("../../../util.zig").rotr;
|
||||
|
||||
pub fn psrTransfer(comptime I: bool, comptime R: bool, comptime kind: u2) InstrFn {
|
||||
return struct {
|
||||
|
|
|
@ -4,7 +4,7 @@ const Bus = @import("../../Bus.zig");
|
|||
const Arm7tdmi = @import("../../cpu.zig").Arm7tdmi;
|
||||
const InstrFn = @import("../../cpu.zig").arm.InstrFn;
|
||||
|
||||
const rotr = @import("../../util.zig").rotr;
|
||||
const rotr = @import("../../../util.zig").rotr;
|
||||
|
||||
pub fn singleDataSwap(comptime B: bool) InstrFn {
|
||||
return struct {
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
const std = @import("std");
|
||||
const util = @import("../../util.zig");
|
||||
const util = @import("../../../util.zig");
|
||||
|
||||
const shifter = @import("../barrel_shifter.zig");
|
||||
const Bus = @import("../../Bus.zig");
|
||||
const Arm7tdmi = @import("../../cpu.zig").Arm7tdmi;
|
||||
const InstrFn = @import("../../cpu.zig").arm.InstrFn;
|
||||
|
||||
const rotr = @import("../../util.zig").rotr;
|
||||
const rotr = @import("../../../util.zig").rotr;
|
||||
|
||||
pub fn singleDataTransfer(comptime I: bool, comptime P: bool, comptime U: bool, comptime B: bool, comptime W: bool, comptime L: bool) InstrFn {
|
||||
return struct {
|
||||
|
|
|
@ -3,7 +3,7 @@ const std = @import("std");
|
|||
const Arm7tdmi = @import("../cpu.zig").Arm7tdmi;
|
||||
const CPSR = @import("../cpu.zig").PSR;
|
||||
|
||||
const rotr = @import("../util.zig").rotr;
|
||||
const rotr = @import("../../util.zig").rotr;
|
||||
|
||||
pub fn execute(comptime S: bool, cpu: *Arm7tdmi, opcode: u32) u32 {
|
||||
var result: u32 = undefined;
|
||||
|
|
|
@ -3,7 +3,7 @@ const Arm7tdmi = @import("../../cpu.zig").Arm7tdmi;
|
|||
const InstrFn = @import("../../cpu.zig").thumb.InstrFn;
|
||||
|
||||
const checkCond = @import("../../cpu.zig").checkCond;
|
||||
const sext = @import("../../util.zig").sext;
|
||||
const sext = @import("../../../util.zig").sext;
|
||||
|
||||
pub fn fmt16(comptime cond: u4) InstrFn {
|
||||
return struct {
|
||||
|
|
|
@ -4,7 +4,8 @@ const Bus = @import("../../Bus.zig");
|
|||
const Arm7tdmi = @import("../../cpu.zig").Arm7tdmi;
|
||||
const InstrFn = @import("../../cpu.zig").thumb.InstrFn;
|
||||
|
||||
const rotr = @import("../../util.zig").rotr;
|
||||
const rotr = @import("../../../util.zig").rotr;
|
||||
const sext = @import("../../../util.zig").sext;
|
||||
|
||||
pub fn fmt6(comptime rd: u3) InstrFn {
|
||||
return struct {
|
||||
|
@ -16,8 +17,6 @@ pub fn fmt6(comptime rd: u3) InstrFn {
|
|||
}.inner;
|
||||
}
|
||||
|
||||
const sext = @import("../../util.zig").sext;
|
||||
|
||||
pub fn fmt78(comptime op: u2, comptime T: bool) InstrFn {
|
||||
return struct {
|
||||
fn inner(cpu: *Arm7tdmi, bus: *Bus, opcode: u16) void {
|
||||
|
|
|
@ -4,8 +4,8 @@ const SDL = @import("sdl2");
|
|||
const Bus = @import("Bus.zig");
|
||||
const Scheduler = @import("scheduler.zig").Scheduler;
|
||||
const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
|
||||
const FpsTracker = @import("util.zig").FpsTracker;
|
||||
const FilePaths = @import("util.zig").FilePaths;
|
||||
const FpsTracker = @import("../util.zig").FpsTracker;
|
||||
const FilePaths = @import("../util.zig").FilePaths;
|
||||
|
||||
const Timer = std.time.Timer;
|
||||
const Thread = std.Thread;
|
||||
|
|
408
src/core/ppu.zig
408
src/core/ppu.zig
|
@ -1,16 +1,19 @@
|
|||
const std = @import("std");
|
||||
const io = @import("bus/io.zig");
|
||||
const Bit = @import("bitfield").Bit;
|
||||
const Bitfield = @import("bitfield").Bitfield;
|
||||
const dma = @import("bus/dma.zig");
|
||||
|
||||
const Oam = @import("ppu/Oam.zig");
|
||||
const Palette = @import("ppu/Palette.zig");
|
||||
const Vram = @import("ppu/Vram.zig");
|
||||
const EventKind = @import("scheduler.zig").EventKind;
|
||||
const Scheduler = @import("scheduler.zig").Scheduler;
|
||||
const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
|
||||
|
||||
const Bit = @import("bitfield").Bit;
|
||||
const Bitfield = @import("bitfield").Bitfield;
|
||||
const FrameBuffer = @import("../util.zig").FrameBuffer;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const log = std.log.scoped(.PPU);
|
||||
const pollBlankingDma = @import("bus/dma.zig").pollBlankingDma;
|
||||
const log = std.log.scoped(.Ppu);
|
||||
|
||||
/// This is used to generate byuu / Talurabi's Color Correction algorithm
|
||||
const COLOUR_LUT = genColourLut();
|
||||
|
@ -51,14 +54,14 @@ pub const Ppu = struct {
|
|||
sched.push(.Draw, 240 * 4);
|
||||
|
||||
const sprites = try allocator.create([128]?Sprite);
|
||||
sprites.* = [_]?Sprite{null} ** 128;
|
||||
std.mem.set(?Sprite, sprites, null);
|
||||
|
||||
return Self{
|
||||
.vram = try Vram.init(allocator),
|
||||
.palette = try Palette.init(allocator),
|
||||
.oam = try Oam.init(allocator),
|
||||
.sched = sched,
|
||||
.framebuf = try FrameBuffer.init(allocator),
|
||||
.framebuf = try FrameBuffer.init(allocator, framebuf_pitch * height),
|
||||
.allocator = allocator,
|
||||
|
||||
// Registers
|
||||
|
@ -277,16 +280,17 @@ pub const Ppu = struct {
|
|||
aff_x += self.aff_bg[n - 2].pa;
|
||||
aff_y += self.aff_bg[n - 2].pc;
|
||||
|
||||
if (!shouldDrawBackground(n, self.bldcnt, &self.scanline, i)) continue;
|
||||
const x = @bitCast(u32, ix);
|
||||
const y = @bitCast(u32, iy);
|
||||
|
||||
const win_bounds = self.windowBounds(@truncate(u9, x), @truncate(u8, y));
|
||||
if (!shouldDrawBackground(self, n, win_bounds, i)) continue;
|
||||
|
||||
if (self.bg[n].cnt.display_overflow.read()) {
|
||||
ix = if (ix > px_width) @rem(ix, px_width) else if (ix < 0) px_width + @rem(ix, px_width) else ix;
|
||||
iy = if (iy > px_height) @rem(iy, px_height) else if (iy < 0) px_height + @rem(iy, px_height) else iy;
|
||||
} else if (ix > px_width or iy > px_height or ix < 0 or iy < 0) continue;
|
||||
|
||||
const x = @bitCast(u32, ix);
|
||||
const y = @bitCast(u32, iy);
|
||||
|
||||
const tile_id: u32 = self.vram.read(u8, screen_base + ((y / 8) * @bitCast(u32, tile_width) + (x / 8)));
|
||||
const row = y & 7;
|
||||
const col = x & 7;
|
||||
|
@ -296,7 +300,7 @@ pub const Ppu = struct {
|
|||
|
||||
if (pal_id != 0) {
|
||||
const bgr555 = self.palette.read(u16, pal_id * 2);
|
||||
copyToBackgroundBuffer(n, self.bldcnt, &self.scanline, i, bgr555);
|
||||
self.copyToBackgroundBuffer(n, win_bounds, i, bgr555);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -305,7 +309,7 @@ pub const Ppu = struct {
|
|||
self.aff_bg[n - 2].y_latch.? += self.aff_bg[n - 2].pd; // PD is added to BGxY
|
||||
}
|
||||
|
||||
fn drawBackround(self: *Self, comptime n: u2) void {
|
||||
fn drawBackground(self: *Self, comptime n: u2) void {
|
||||
// A Tile in a charblock is a byte, while a Screen Entry is a halfword
|
||||
|
||||
const char_base = 0x4000 * @as(u32, self.bg[n].cnt.char_base.read());
|
||||
|
@ -325,10 +329,11 @@ pub const Ppu = struct {
|
|||
|
||||
var i: u32 = 0;
|
||||
while (i < width) : (i += 1) {
|
||||
if (!shouldDrawBackground(n, self.bldcnt, &self.scanline, i)) continue;
|
||||
|
||||
const x = hofs + i;
|
||||
|
||||
const win_bounds = self.windowBounds(@truncate(u9, x), @truncate(u8, y));
|
||||
if (!shouldDrawBackground(self, n, win_bounds, i)) continue;
|
||||
|
||||
// Grab the Screen Entry from VRAM
|
||||
const entry_addr = screen_base + tilemapOffset(size, x, y);
|
||||
const entry = @bitCast(ScreenEntry, self.vram.read(u16, entry_addr));
|
||||
|
@ -353,7 +358,7 @@ pub const Ppu = struct {
|
|||
|
||||
if (pal_id != 0) {
|
||||
const bgr555 = self.palette.read(u16, pal_id * 2);
|
||||
copyToBackgroundBuffer(n, self.bldcnt, &self.scanline, i, bgr555);
|
||||
self.copyToBackgroundBuffer(n, win_bounds, i, bgr555);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -379,10 +384,10 @@ pub const Ppu = struct {
|
|||
var layer: usize = 0;
|
||||
while (layer < 4) : (layer += 1) {
|
||||
self.drawSprites(@truncate(u2, layer));
|
||||
if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackround(0);
|
||||
if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackround(1);
|
||||
if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawBackround(2);
|
||||
if (layer == self.bg[3].cnt.priority.read() and bg_enable >> 3 & 1 == 1) self.drawBackround(3);
|
||||
if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackground(0);
|
||||
if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackground(1);
|
||||
if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawBackground(2);
|
||||
if (layer == self.bg[3].cnt.priority.read() and bg_enable >> 3 & 1 == 1) self.drawBackground(3);
|
||||
}
|
||||
|
||||
// Copy Drawn Scanline to Frame Buffer
|
||||
|
@ -407,8 +412,8 @@ pub const Ppu = struct {
|
|||
var layer: usize = 0;
|
||||
while (layer < 4) : (layer += 1) {
|
||||
self.drawSprites(@truncate(u2, layer));
|
||||
if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackround(0);
|
||||
if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackround(1);
|
||||
if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackground(0);
|
||||
if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackground(1);
|
||||
if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawAffineBackground(2);
|
||||
}
|
||||
|
||||
|
@ -486,7 +491,7 @@ pub const Ppu = struct {
|
|||
while (i < width) : (i += 1) {
|
||||
// If we're outside of the bounds of mode 5, draw the background colour
|
||||
const bgr555 =
|
||||
if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.getBackdrop();
|
||||
if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.backdrop();
|
||||
|
||||
std.mem.writeIntNative(u32, self.framebuf.get(.Emulator)[fb_base + i * @sizeOf(u32) ..][0..@sizeOf(u32)], COLOUR_LUT[bgr555 & 0x7FFF]);
|
||||
}
|
||||
|
@ -530,7 +535,94 @@ pub const Ppu = struct {
|
|||
}
|
||||
|
||||
if (maybe_top) |top| return top;
|
||||
return self.palette.getBackdrop();
|
||||
return self.palette.backdrop();
|
||||
}
|
||||
|
||||
fn copyToBackgroundBuffer(self: *Self, comptime n: u2, bounds: ?WindowBounds, i: usize, bgr555: u16) void {
|
||||
if (self.bldcnt.mode.read() != 0b00) {
|
||||
// Standard Alpha Blending
|
||||
const a_layers = self.bldcnt.layer_a.read();
|
||||
const is_blend_enabled = (a_layers >> n) & 1 == 1;
|
||||
|
||||
// If Alpha Blending is enabled and we've found an eligible layer for
|
||||
// Pixel A, store the pixel in the bottom pixel buffer
|
||||
|
||||
const win_part = if (bounds) |win| blk: {
|
||||
// Window Enabled
|
||||
break :blk switch (win) {
|
||||
.win0 => self.win.in.w0_bld.read(),
|
||||
.win1 => self.win.in.w1_bld.read(),
|
||||
.out => self.win.out.out_bld.read(),
|
||||
};
|
||||
} else true;
|
||||
|
||||
if (win_part and is_blend_enabled) {
|
||||
self.scanline.btm()[i] = bgr555;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
self.scanline.top()[i] = bgr555;
|
||||
}
|
||||
|
||||
const WindowBounds = enum { win0, win1, out };
|
||||
|
||||
fn windowBounds(self: *Self, x: u9, y: u8) ?WindowBounds {
|
||||
const win0 = self.dispcnt.win_enable.read() & 1 == 1;
|
||||
const win1 = (self.dispcnt.win_enable.read() >> 1) & 1 == 1;
|
||||
const winObj = self.dispcnt.obj_win_enable.read();
|
||||
|
||||
if (!(win0 or win1 or winObj)) return null;
|
||||
|
||||
if (win0 and self.win.inRange(0, x, y)) return .win0;
|
||||
if (win1 and self.win.inRange(1, x, y)) return .win1;
|
||||
|
||||
return .out;
|
||||
}
|
||||
|
||||
fn shouldDrawBackground(self: *Self, comptime n: u2, bounds: ?WindowBounds, i: usize) bool {
|
||||
// If a pixel has been drawn on the top layer, it's because:
|
||||
// 1. The pixel is to be blended with a pixel on the bottom layer
|
||||
// 2. The pixel is not to be blended at all
|
||||
// Also, if we find a pixel on the top layer we don't need to bother with this I think?
|
||||
if (self.scanline.top()[i] != null) return false;
|
||||
|
||||
if (bounds) |win| {
|
||||
switch (win) {
|
||||
.win0 => if ((self.win.in.w0_bg.read() >> n) & 1 == 0) return false,
|
||||
.win1 => if ((self.win.in.w1_bg.read() >> n) & 1 == 0) return false,
|
||||
.out => if ((self.win.out.out_bg.read() >> n) & 1 == 0) return false,
|
||||
}
|
||||
}
|
||||
|
||||
if (self.scanline.btm()[i] != null) {
|
||||
// The pixel found in the bottom layer is:
|
||||
// 1. From a higher priority background
|
||||
// 2. From a background that is marked for blending (Pixel A)
|
||||
|
||||
// If Alpha Blending isn't enabled, then we've already found a higher prio
|
||||
// pixel, we can return early
|
||||
if (self.bldcnt.mode.read() != 0b01) return false;
|
||||
|
||||
const b_layers = self.bldcnt.layer_b.read();
|
||||
|
||||
const win_part = if (bounds) |win| blk: {
|
||||
// Window Enabled
|
||||
break :blk switch (win) {
|
||||
.win0 => self.win.in.w0_bld.read(),
|
||||
.win1 => self.win.in.w1_bld.read(),
|
||||
.out => self.win.out.out_bld.read(),
|
||||
};
|
||||
} else true;
|
||||
|
||||
// If the Background is not marked for blending, we've already found
|
||||
// a higher priority pixel, move on.
|
||||
|
||||
const is_blend_enabled = win_part and ((b_layers >> n) & 1 == 1);
|
||||
if (!is_blend_enabled) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: Comment this + get a better understanding
|
||||
|
@ -572,7 +664,7 @@ pub const Ppu = struct {
|
|||
// See if HBlank DMA is present and not enabled
|
||||
|
||||
if (!self.dispstat.vblank.read())
|
||||
pollBlankingDma(cpu.bus, .HBlank);
|
||||
dma.onBlanking(cpu.bus, .HBlank);
|
||||
|
||||
self.dispstat.hblank.set();
|
||||
self.sched.push(.HBlank, 68 * 4 -| late);
|
||||
|
@ -614,7 +706,7 @@ pub const Ppu = struct {
|
|||
self.aff_bg[1].latchRefPoints();
|
||||
|
||||
// See if Vblank DMA is present and not enabled
|
||||
pollBlankingDma(cpu.bus, .VBlank);
|
||||
dma.onBlanking(cpu.bus, .VBlank);
|
||||
}
|
||||
|
||||
if (scanline == 227) self.dispstat.vblank.unset();
|
||||
|
@ -623,158 +715,6 @@ pub const Ppu = struct {
|
|||
}
|
||||
};
|
||||
|
||||
const Palette = struct {
|
||||
const palram_size = 0x400;
|
||||
const Self = @This();
|
||||
|
||||
buf: []u8,
|
||||
allocator: Allocator,
|
||||
|
||||
fn init(allocator: Allocator) !Self {
|
||||
const buf = try allocator.alloc(u8, palram_size);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return Self{
|
||||
.buf = buf,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
return switch (T) {
|
||||
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
|
||||
else => @compileError("PALRAM: Unsupported read width"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
switch (T) {
|
||||
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
|
||||
u8 => {
|
||||
const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
|
||||
std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
|
||||
},
|
||||
else => @compileError("PALRAM: Unsupported write width"),
|
||||
}
|
||||
}
|
||||
|
||||
fn getBackdrop(self: *const Self) u16 {
|
||||
return self.read(u16, 0);
|
||||
}
|
||||
};
|
||||
|
||||
const Vram = struct {
|
||||
const vram_size = 0x18000;
|
||||
const Self = @This();
|
||||
|
||||
buf: []u8,
|
||||
allocator: Allocator,
|
||||
|
||||
fn init(allocator: Allocator) !Self {
|
||||
const buf = try allocator.alloc(u8, vram_size);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return Self{
|
||||
.buf = buf,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
|
||||
const addr = Self.mirror(address);
|
||||
|
||||
return switch (T) {
|
||||
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
|
||||
else => @compileError("VRAM: Unsupported read width"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
|
||||
const mode: u3 = dispcnt.bg_mode.read();
|
||||
const idx = Self.mirror(address);
|
||||
|
||||
switch (T) {
|
||||
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
|
||||
u8 => {
|
||||
// Ignore write if it falls within the boundaries of OBJ VRAM
|
||||
switch (mode) {
|
||||
0, 1, 2 => if (0x0001_0000 <= idx) return,
|
||||
else => if (0x0001_4000 <= idx) return,
|
||||
}
|
||||
|
||||
const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
|
||||
std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
|
||||
},
|
||||
else => @compileError("VRAM: Unsupported write width"),
|
||||
}
|
||||
}
|
||||
|
||||
fn mirror(address: usize) usize {
|
||||
// Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
|
||||
const addr = address & 0x1FFFF;
|
||||
|
||||
// If the address is within 96K we don't do anything,
|
||||
// otherwise we want to mirror the last 32K (addresses between 64K and 96K)
|
||||
return if (addr < vram_size) addr else 0x10000 + (addr & 0x7FFF);
|
||||
}
|
||||
};
|
||||
|
||||
const Oam = struct {
|
||||
const oam_size = 0x400;
|
||||
const Self = @This();
|
||||
|
||||
buf: []u8,
|
||||
allocator: Allocator,
|
||||
|
||||
fn init(allocator: Allocator) !Self {
|
||||
const buf = try allocator.alloc(u8, oam_size);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return Self{
|
||||
.buf = buf,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
return switch (T) {
|
||||
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
|
||||
else => @compileError("OAM: Unsupported read width"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
switch (T) {
|
||||
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
|
||||
u8 => return, // 8-bit writes are explicitly ignored
|
||||
else => @compileError("OAM: Unsupported write width"),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const Window = struct {
|
||||
const Self = @This();
|
||||
|
||||
|
@ -794,6 +734,25 @@ const Window = struct {
|
|||
};
|
||||
}
|
||||
|
||||
fn inRange(self: *const Self, comptime id: u1, x: u9, y: u8) bool {
|
||||
const h = self.h[id];
|
||||
const v = self.v[id];
|
||||
|
||||
const y1 = v.y1.read();
|
||||
const y2 = if (y1 > v.y2.read()) 160 else std.math.min(160, v.y2.read());
|
||||
|
||||
if (y1 <= y and y < y2) {
|
||||
// Within Y bounds
|
||||
const x1 = h.x1.read();
|
||||
const x2 = if (x1 > h.x2.read()) 240 else std.math.min(240, h.x2.read());
|
||||
|
||||
// Within X Bounds
|
||||
return x1 <= x and x < x2;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn setH(self: *Self, value: u32) void {
|
||||
self.h[0].raw = @truncate(u16, value);
|
||||
self.h[1].raw = @truncate(u16, value >> 16);
|
||||
|
@ -1135,37 +1094,6 @@ fn alphaBlend(top: u16, btm: u16, bldalpha: io.BldAlpha) u16 {
|
|||
return (bld_b << 10) | (bld_g << 5) | bld_r;
|
||||
}
|
||||
|
||||
fn shouldDrawBackground(comptime n: u2, bldcnt: io.BldCnt, scanline: *Scanline, i: usize) bool {
|
||||
// If a pixel has been drawn on the top layer, it's because
|
||||
// Either the pixel is to be blended with a pixel on the bottom layer
|
||||
// or the pixel is not to be blended at all
|
||||
// Consequentially, if we find a pixel on the top layer, there's no need
|
||||
// to render anything I think?
|
||||
if (scanline.top()[i] != null) return false;
|
||||
|
||||
if (scanline.btm()[i] != null) {
|
||||
// The Pixel found in the Bottom layer is
|
||||
// 1. From a higher priority
|
||||
// 2. From a Backround that is marked for Blending (Pixel A)
|
||||
//
|
||||
// We now have to confirm whether this current Background can be used
|
||||
// as Pixel B or not.
|
||||
|
||||
// If Alpha Blending isn't enabled, we've aready found a higher
|
||||
// priority pixel to render. Move on
|
||||
if (bldcnt.mode.read() != 0b01) return false;
|
||||
|
||||
const b_layers = bldcnt.layer_b.read();
|
||||
const is_blend_enabled = (b_layers >> n) & 1 == 1;
|
||||
|
||||
// If the Background is not marked for blending, we've already found
|
||||
// a higher priority pixel, move on.
|
||||
if (!is_blend_enabled) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
fn shouldDrawSprite(bldcnt: io.BldCnt, scanline: *Scanline, x: u9) bool {
|
||||
if (scanline.top()[x] != null) return false;
|
||||
|
||||
|
@ -1180,23 +1108,6 @@ fn shouldDrawSprite(bldcnt: io.BldCnt, scanline: *Scanline, x: u9) bool {
|
|||
return true;
|
||||
}
|
||||
|
||||
fn copyToBackgroundBuffer(comptime n: u2, bldcnt: io.BldCnt, scanline: *Scanline, i: usize, bgr555: u16) void {
|
||||
if (bldcnt.mode.read() != 0b00) {
|
||||
// Standard Alpha Blending
|
||||
const a_layers = bldcnt.layer_a.read();
|
||||
const is_blend_enabled = (a_layers >> n) & 1 == 1;
|
||||
|
||||
// If Alpha Blending is enabled and we've found an eligible layer for
|
||||
// Pixel A, store the pixel in the bottom pixel buffer
|
||||
if (is_blend_enabled) {
|
||||
scanline.btm()[i] = bgr555;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
scanline.top()[i] = bgr555;
|
||||
}
|
||||
|
||||
fn copyToSpriteBuffer(bldcnt: io.BldCnt, scanline: *Scanline, x: u9, bgr555: u16) void {
|
||||
if (bldcnt.mode.read() != 0b00) {
|
||||
// Alpha Blending
|
||||
|
@ -1249,48 +1160,3 @@ const Scanline = struct {
|
|||
return self.layers[1];
|
||||
}
|
||||
};
|
||||
|
||||
// Double Buffering Implementation
|
||||
const FrameBuffer = struct {
|
||||
const Self = @This();
|
||||
|
||||
layers: [2][]u8,
|
||||
buf: []u8,
|
||||
current: u1,
|
||||
|
||||
allocator: Allocator,
|
||||
|
||||
// TODO: Rename
|
||||
const Device = enum {
|
||||
Emulator,
|
||||
Renderer,
|
||||
};
|
||||
|
||||
pub fn init(allocator: Allocator) !Self {
|
||||
const framebuf_len = framebuf_pitch * height;
|
||||
const buf = try allocator.alloc(u8, framebuf_len * 2);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return .{
|
||||
// Front and Back Framebuffers
|
||||
.layers = [_][]u8{ buf[0..][0..framebuf_len], buf[framebuf_len..][0..framebuf_len] },
|
||||
.buf = buf,
|
||||
.current = 0,
|
||||
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn swap(self: *Self) void {
|
||||
self.current = ~self.current;
|
||||
}
|
||||
|
||||
pub fn get(self: *Self, comptime dev: Device) []u8 {
|
||||
return self.layers[if (dev == .Emulator) self.current else ~self.current];
|
||||
}
|
||||
};
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
const std = @import("std");
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const buf_len = 0x400;
|
||||
const Self = @This();
|
||||
|
||||
buf: []u8,
|
||||
allocator: Allocator,
|
||||
|
||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
return switch (T) {
|
||||
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
|
||||
else => @compileError("OAM: Unsupported read width"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
switch (T) {
|
||||
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
|
||||
u8 => return, // 8-bit writes are explicitly ignored
|
||||
else => @compileError("OAM: Unsupported write width"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init(allocator: Allocator) !Self {
|
||||
const buf = try allocator.alloc(u8, buf_len);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return Self{ .buf = buf, .allocator = allocator };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
const std = @import("std");
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const buf_len = 0x400;
|
||||
const Self = @This();
|
||||
|
||||
buf: []u8,
|
||||
allocator: Allocator,
|
||||
|
||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
return switch (T) {
|
||||
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
|
||||
else => @compileError("PALRAM: Unsupported read width"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
|
||||
const addr = address & 0x3FF;
|
||||
|
||||
switch (T) {
|
||||
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
|
||||
u8 => {
|
||||
const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
|
||||
std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
|
||||
},
|
||||
else => @compileError("PALRAM: Unsupported write width"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init(allocator: Allocator) !Self {
|
||||
const buf = try allocator.alloc(u8, buf_len);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return Self{ .buf = buf, .allocator = allocator };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn backdrop(self: *const Self) u16 {
|
||||
return self.read(u16, 0);
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
const std = @import("std");
|
||||
const io = @import("../bus/io.zig");
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const buf_len = 0x18000;
|
||||
const Self = @This();
|
||||
|
||||
buf: []u8,
|
||||
allocator: Allocator,
|
||||
|
||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
|
||||
const addr = Self.mirror(address);
|
||||
|
||||
return switch (T) {
|
||||
u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
|
||||
else => @compileError("VRAM: Unsupported read width"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
|
||||
const mode: u3 = dispcnt.bg_mode.read();
|
||||
const idx = Self.mirror(address);
|
||||
|
||||
switch (T) {
|
||||
u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
|
||||
u8 => {
|
||||
// Ignore write if it falls within the boundaries of OBJ VRAM
|
||||
switch (mode) {
|
||||
0, 1, 2 => if (0x0001_0000 <= idx) return,
|
||||
else => if (0x0001_4000 <= idx) return,
|
||||
}
|
||||
|
||||
const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
|
||||
std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
|
||||
},
|
||||
else => @compileError("VRAM: Unsupported write width"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init(allocator: Allocator) !Self {
|
||||
const buf = try allocator.alloc(u8, buf_len);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return Self{ .buf = buf, .allocator = allocator };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
fn mirror(address: usize) usize {
|
||||
// Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
|
||||
const addr = address & 0x1FFFF;
|
||||
|
||||
// If the address is within 96K we don't do anything,
|
||||
// otherwise we want to mirror the last 32K (addresses between 64K and 96K)
|
||||
return if (addr < buf_len) addr else 0x10000 + (addr & 0x7FFF);
|
||||
}
|
|
@ -8,7 +8,7 @@ const Gui = @import("platform.zig").Gui;
|
|||
const Bus = @import("core/Bus.zig");
|
||||
const Arm7tdmi = @import("core/cpu.zig").Arm7tdmi;
|
||||
const Scheduler = @import("core/scheduler.zig").Scheduler;
|
||||
const FilePaths = @import("core/util.zig").FilePaths;
|
||||
const FilePaths = @import("util.zig").FilePaths;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const log = std.log.scoped(.Cli);
|
||||
|
|
|
@ -5,9 +5,9 @@ const emu = @import("core/emu.zig");
|
|||
const Apu = @import("core/apu.zig").Apu;
|
||||
const Arm7tdmi = @import("core/cpu.zig").Arm7tdmi;
|
||||
const Scheduler = @import("core/scheduler.zig").Scheduler;
|
||||
const FpsTracker = @import("core/util.zig").FpsTracker;
|
||||
const FpsTracker = @import("util.zig").FpsTracker;
|
||||
|
||||
const span = @import("core/util.zig").span;
|
||||
const span = @import("util.zig").span;
|
||||
|
||||
const pitch = @import("core/ppu.zig").framebuf_pitch;
|
||||
const scale = @import("core/emu.zig").win_scale;
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const Log2Int = std.math.Log2Int;
|
||||
const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
|
||||
const Arm7tdmi = @import("core/cpu.zig").Arm7tdmi;
|
||||
|
||||
const allow_unhandled_io = @import("emu.zig").allow_unhandled_io;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const allow_unhandled_io = @import("core/emu.zig").allow_unhandled_io;
|
||||
|
||||
// Sign-Extend value of type `T` to type `U`
|
||||
pub fn sext(comptime T: type, comptime U: type, value: T) T {
|
||||
|
@ -172,6 +174,7 @@ pub fn writeUndefined(log: anytype, comptime format: []const u8, args: anytype)
|
|||
|
||||
pub const Logger = struct {
|
||||
const Self = @This();
|
||||
const FmtArgTuple = std.meta.Tuple(&.{ u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32 });
|
||||
|
||||
buf: std.io.BufferedWriter(4096 << 2, std.fs.File.Writer),
|
||||
|
||||
|
@ -229,4 +232,43 @@ pub const Logger = struct {
|
|||
}
|
||||
};
|
||||
|
||||
const FmtArgTuple = std.meta.Tuple(&.{ u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32 });
|
||||
// Double Buffering Implementation
|
||||
pub const FrameBuffer = struct {
|
||||
const Self = @This();
|
||||
|
||||
layers: [2][]u8,
|
||||
buf: []u8,
|
||||
current: u1,
|
||||
|
||||
allocator: Allocator,
|
||||
|
||||
// TODO: Rename
|
||||
const Device = enum { Emulator, Renderer };
|
||||
|
||||
pub fn init(allocator: Allocator, comptime len: comptime_int) !Self {
|
||||
const buf = try allocator.alloc(u8, len * 2);
|
||||
std.mem.set(u8, buf, 0);
|
||||
|
||||
return .{
|
||||
// Front and Back Framebuffers
|
||||
.layers = [_][]u8{ buf[0..][0..len], buf[len..][0..len] },
|
||||
.buf = buf,
|
||||
.current = 0,
|
||||
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.allocator.free(self.buf);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn swap(self: *Self) void {
|
||||
self.current = ~self.current;
|
||||
}
|
||||
|
||||
pub fn get(self: *Self, comptime dev: Device) []u8 {
|
||||
return self.layers[if (dev == .Emulator) self.current else ~self.current];
|
||||
}
|
||||
};
|
Loading…
Reference in New Issue