Compare commits
	
		
			10 Commits
		
	
	
		
			a17d7e6d41
			...
			102eb0c1e6
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 102eb0c1e6 | |||
| dda533f557 | |||
| 29e9d3c288 | |||
| 06ee31980e | |||
| 4a6428eee2 | |||
| c9559fcb6d | |||
| a8c1fecf8b | |||
| 16e1ede50f | |||
| 4e8691ff58 | |||
| 445b53a609 | 
@@ -107,7 +107,7 @@ pub fn deinit(self: *Self) void {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn fillReadTable(bus: *Self, table: *[table_len]?*const anyopaque) void {
 | 
			
		||||
    const vramMirror = @import("ppu.zig").Vram.mirror;
 | 
			
		||||
    const vramMirror = @import("ppu/Vram.zig").mirror;
 | 
			
		||||
 | 
			
		||||
    for (table) |*ptr, i| {
 | 
			
		||||
        const addr = page_size * i;
 | 
			
		||||
@@ -134,7 +134,7 @@ fn fillReadTable(bus: *Self, table: *[table_len]?*const anyopaque) void {
 | 
			
		||||
 | 
			
		||||
fn fillWriteTable(comptime T: type, bus: *Self, table: *[table_len]?*const anyopaque) void {
 | 
			
		||||
    comptime std.debug.assert(T == u32 or T == u16 or T == u8);
 | 
			
		||||
    const vramMirror = @import("ppu.zig").Vram.mirror;
 | 
			
		||||
    const vramMirror = @import("ppu/Vram.zig").mirror;
 | 
			
		||||
 | 
			
		||||
    for (table) |*ptr, i| {
 | 
			
		||||
        const addr = page_size * i;
 | 
			
		||||
 
 | 
			
		||||
@@ -333,7 +333,7 @@ fn DmaController(comptime id: u2) type {
 | 
			
		||||
    };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn pollDmaOnBlank(bus: *Bus, comptime kind: DmaKind) void {
 | 
			
		||||
pub fn onBlanking(bus: *Bus, comptime kind: DmaKind) void {
 | 
			
		||||
    comptime var i: usize = 0;
 | 
			
		||||
    inline while (i < 4) : (i += 1) {
 | 
			
		||||
        bus.dma[i].poll(kind);
 | 
			
		||||
 
 | 
			
		||||
@@ -424,6 +424,8 @@ pub const BldY = extern union {
 | 
			
		||||
    raw: u16,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const u8WriteKind = enum { Hi, Lo };
 | 
			
		||||
 | 
			
		||||
/// Write-only
 | 
			
		||||
pub const WinH = extern union {
 | 
			
		||||
    x2: Bitfield(u16, 0, 8),
 | 
			
		||||
@@ -433,6 +435,8 @@ pub const WinH = extern union {
 | 
			
		||||
 | 
			
		||||
/// Write-only
 | 
			
		||||
pub const WinV = extern union {
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
 | 
			
		||||
    y2: Bitfield(u16, 0, 8),
 | 
			
		||||
    y1: Bitfield(u16, 8, 8),
 | 
			
		||||
    raw: u16,
 | 
			
		||||
@@ -441,20 +445,20 @@ pub const WinV = extern union {
 | 
			
		||||
pub const WinIn = extern union {
 | 
			
		||||
    w0_bg: Bitfield(u16, 0, 4),
 | 
			
		||||
    w0_obj: Bit(u16, 4),
 | 
			
		||||
    w0_colour: Bit(u16, 5),
 | 
			
		||||
    w0_bld: Bit(u16, 5),
 | 
			
		||||
    w1_bg: Bitfield(u16, 8, 4),
 | 
			
		||||
    w1_obj: Bit(u16, 12),
 | 
			
		||||
    w1_colour: Bit(u16, 13),
 | 
			
		||||
    w1_bld: Bit(u16, 13),
 | 
			
		||||
    raw: u16,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
pub const WinOut = extern union {
 | 
			
		||||
    out_bg: Bitfield(u16, 0, 4),
 | 
			
		||||
    out_obj: Bit(u16, 4),
 | 
			
		||||
    out_colour: Bit(u16, 5),
 | 
			
		||||
    out_bld: Bit(u16, 5),
 | 
			
		||||
    obj_bg: Bitfield(u16, 8, 4),
 | 
			
		||||
    obj_obj: Bit(u16, 12),
 | 
			
		||||
    obj_colour: Bit(u16, 13),
 | 
			
		||||
    obj_bld: Bit(u16, 13),
 | 
			
		||||
    raw: u16,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										476
									
								
								src/core/ppu.zig
									
									
									
									
									
								
							
							
						
						
									
										476
									
								
								src/core/ppu.zig
									
									
									
									
									
								
							@@ -2,11 +2,16 @@ const std = @import("std");
 | 
			
		||||
const io = @import("bus/io.zig");
 | 
			
		||||
const util = @import("../util.zig");
 | 
			
		||||
 | 
			
		||||
const Scheduler = @import("scheduler.zig").Scheduler;
 | 
			
		||||
const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
 | 
			
		||||
 | 
			
		||||
const Bit = @import("bitfield").Bit;
 | 
			
		||||
const Bitfield = @import("bitfield").Bitfield;
 | 
			
		||||
const dma = @import("bus/dma.zig");
 | 
			
		||||
 | 
			
		||||
const Oam = @import("ppu/Oam.zig");
 | 
			
		||||
const Palette = @import("ppu/Palette.zig");
 | 
			
		||||
const Vram = @import("ppu/Vram.zig");
 | 
			
		||||
const Scheduler = @import("scheduler.zig").Scheduler;
 | 
			
		||||
const Arm7tdmi = @import("cpu.zig").Arm7tdmi;
 | 
			
		||||
const FrameBuffer = @import("../util.zig").FrameBuffer;
 | 
			
		||||
 | 
			
		||||
const Allocator = std.mem.Allocator;
 | 
			
		||||
const log = std.log.scoped(.PPU);
 | 
			
		||||
@@ -14,7 +19,6 @@ const log = std.log.scoped(.PPU);
 | 
			
		||||
const getHalf = util.getHalf;
 | 
			
		||||
const setHalf = util.setHalf;
 | 
			
		||||
const setQuart = util.setQuart;
 | 
			
		||||
const pollDmaOnBlank = @import("bus/dma.zig").pollDmaOnBlank;
 | 
			
		||||
 | 
			
		||||
pub const width = 240;
 | 
			
		||||
pub const height = 160;
 | 
			
		||||
@@ -259,18 +263,17 @@ pub const Ppu = struct {
 | 
			
		||||
    scanline: Scanline,
 | 
			
		||||
 | 
			
		||||
    pub fn init(allocator: Allocator, sched: *Scheduler) !Self {
 | 
			
		||||
        // Queue first Hblank
 | 
			
		||||
        sched.push(.Draw, 240 * 4);
 | 
			
		||||
        sched.push(.Draw, 240 * 4); // Add first PPU Event to Scheduler
 | 
			
		||||
 | 
			
		||||
        const sprites = try allocator.create([128]?Sprite);
 | 
			
		||||
        sprites.* = [_]?Sprite{null} ** 128;
 | 
			
		||||
        std.mem.set(?Sprite, sprites, null);
 | 
			
		||||
 | 
			
		||||
        return Self{
 | 
			
		||||
            .vram = try Vram.init(allocator),
 | 
			
		||||
            .palette = try Palette.init(allocator),
 | 
			
		||||
            .oam = try Oam.init(allocator),
 | 
			
		||||
            .sched = sched,
 | 
			
		||||
            .framebuf = try FrameBuffer.init(allocator),
 | 
			
		||||
            .framebuf = try FrameBuffer.init(allocator, framebuf_pitch * height),
 | 
			
		||||
            .allocator = allocator,
 | 
			
		||||
 | 
			
		||||
            // Registers
 | 
			
		||||
@@ -320,20 +323,16 @@ pub const Ppu = struct {
 | 
			
		||||
            // Only consider enabled Sprites
 | 
			
		||||
            if (attr0.is_affine.read() or !attr0.disabled.read()) {
 | 
			
		||||
                const attr1 = @bitCast(Attr1, self.oam.read(u16, i + 2));
 | 
			
		||||
                const sprite_height = spriteDimensions(attr0.shape.read(), attr1.size.read())[1];
 | 
			
		||||
 | 
			
		||||
                // When fetching sprites we only care about ones that could be rendered
 | 
			
		||||
                // on this scanline
 | 
			
		||||
                const iy = @bitCast(i8, y);
 | 
			
		||||
 | 
			
		||||
                const start = attr0.y.read();
 | 
			
		||||
                const istart = @bitCast(i8, start);
 | 
			
		||||
 | 
			
		||||
                const end = start +% spriteDimensions(attr0.shape.read(), attr1.size.read())[1];
 | 
			
		||||
                const iend = @bitCast(i8, end);
 | 
			
		||||
                var y_pos: i32 = attr0.y.read();
 | 
			
		||||
                if (y_pos >= 160) y_pos -= 256; // fleroviux's solution to negative positions
 | 
			
		||||
 | 
			
		||||
                // Sprites are expected to be able to wraparound, we perform the same check
 | 
			
		||||
                // for unsigned and signed values so that we handle all valid sprite positions
 | 
			
		||||
                if ((start <= y and y < end) or (istart <= iy and iy < iend)) {
 | 
			
		||||
                if (y_pos <= y and y < (y_pos + sprite_height)) {
 | 
			
		||||
                    for (self.scanline_sprites) |*maybe_sprite| {
 | 
			
		||||
                        if (maybe_sprite.* == null) {
 | 
			
		||||
                            maybe_sprite.* = Sprite.init(attr0, attr1, @bitCast(Attr2, self.oam.read(u16, i + 4)));
 | 
			
		||||
@@ -360,8 +359,6 @@ pub const Ppu = struct {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn drawAffineSprite(self: *Self, sprite: AffineSprite) void {
 | 
			
		||||
        const iy = @bitCast(i8, self.vcount.scanline.read());
 | 
			
		||||
 | 
			
		||||
        const is_8bpp = sprite.is8bpp();
 | 
			
		||||
        const tile_id: u32 = sprite.tileId();
 | 
			
		||||
        const obj_mapping = self.dispcnt.obj_mapping.read();
 | 
			
		||||
@@ -370,25 +367,22 @@ pub const Ppu = struct {
 | 
			
		||||
 | 
			
		||||
        const char_base = 0x4000 * 4;
 | 
			
		||||
 | 
			
		||||
        const y = self.vcount.scanline.read();
 | 
			
		||||
 | 
			
		||||
        var i: u9 = 0;
 | 
			
		||||
        while (i < sprite.width) : (i += 1) {
 | 
			
		||||
            const x = (sprite.x() +% i) % width;
 | 
			
		||||
            const ix = @bitCast(i9, x);
 | 
			
		||||
 | 
			
		||||
            if (!shouldDrawSprite(self.bld.cnt, &self.scanline, x)) continue;
 | 
			
		||||
 | 
			
		||||
            const sprite_start = sprite.x();
 | 
			
		||||
            const isprite_start = @bitCast(i9, sprite_start);
 | 
			
		||||
            const sprite_end = sprite_start +% sprite.width;
 | 
			
		||||
            const isprite_end = @bitCast(i9, sprite_end);
 | 
			
		||||
            var x_pos: i32 = sprite.x();
 | 
			
		||||
            if (x_pos >= 240) x_pos -= 512;
 | 
			
		||||
 | 
			
		||||
            const condition = (sprite_start <= x and x < sprite_end) or (isprite_start <= ix and ix < isprite_end);
 | 
			
		||||
            if (!condition) continue;
 | 
			
		||||
            if (!(x_pos <= x and x < (x_pos + sprite.width))) continue;
 | 
			
		||||
 | 
			
		||||
            // Sprite is within bounds and therefore should be rendered
 | 
			
		||||
            // std.math.absInt is branchless
 | 
			
		||||
            const tile_x = @bitCast(u9, std.math.absInt(ix - @bitCast(i9, sprite.x())) catch unreachable);
 | 
			
		||||
            const tile_y = @bitCast(u8, std.math.absInt(iy -% @bitCast(i8, sprite.y())) catch unreachable);
 | 
			
		||||
            const tile_x = @bitCast(u32, @as(i32, std.math.absInt(@as(i32, x) - x_pos) catch unreachable));
 | 
			
		||||
            const tile_y = @bitCast(u32, @as(i32, std.math.absInt(@bitCast(i8, y) -% @bitCast(i8, sprite.y())) catch unreachable));
 | 
			
		||||
 | 
			
		||||
            const row = @truncate(u3, tile_y);
 | 
			
		||||
            const col = @truncate(u3, tile_x);
 | 
			
		||||
@@ -410,8 +404,6 @@ pub const Ppu = struct {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn drawSprite(self: *Self, sprite: Sprite) void {
 | 
			
		||||
        const iy = @bitCast(i8, self.vcount.scanline.read());
 | 
			
		||||
 | 
			
		||||
        const is_8bpp = sprite.is8bpp();
 | 
			
		||||
        const tile_id: u32 = sprite.tileId();
 | 
			
		||||
        const obj_mapping = self.dispcnt.obj_mapping.read();
 | 
			
		||||
@@ -420,31 +412,27 @@ pub const Ppu = struct {
 | 
			
		||||
 | 
			
		||||
        const char_base = 0x4000 * 4;
 | 
			
		||||
 | 
			
		||||
        const y = self.vcount.scanline.read();
 | 
			
		||||
 | 
			
		||||
        var i: u9 = 0;
 | 
			
		||||
        while (i < sprite.width) : (i += 1) {
 | 
			
		||||
            const x = (sprite.x() +% i) % width;
 | 
			
		||||
            const ix = @bitCast(i9, x);
 | 
			
		||||
 | 
			
		||||
            if (!shouldDrawSprite(self.bld.cnt, &self.scanline, x)) continue;
 | 
			
		||||
 | 
			
		||||
            const sprite_start = sprite.x();
 | 
			
		||||
            const isprite_start = @bitCast(i9, sprite_start);
 | 
			
		||||
            const sprite_end = sprite_start +% sprite.width;
 | 
			
		||||
            const isprite_end = @bitCast(i9, sprite_end);
 | 
			
		||||
            var x_pos: i32 = sprite.x();
 | 
			
		||||
            if (x_pos >= 240) x_pos -= 512;
 | 
			
		||||
 | 
			
		||||
            const condition = (sprite_start <= x and x < sprite_end) or (isprite_start <= ix and ix < isprite_end);
 | 
			
		||||
            if (!condition) continue;
 | 
			
		||||
            if (!(x_pos <= x and x < (x_pos + sprite.width))) continue;
 | 
			
		||||
 | 
			
		||||
            // Sprite is within bounds and therefore should be rendered
 | 
			
		||||
            // std.math.absInt is branchless
 | 
			
		||||
            const x_diff = @bitCast(u9, std.math.absInt(ix - @bitCast(i9, sprite.x())) catch unreachable);
 | 
			
		||||
            const y_diff = @bitCast(u8, std.math.absInt(iy -% @bitCast(i8, sprite.y())) catch unreachable);
 | 
			
		||||
            const x_diff: i32 = std.math.absInt(@as(i32, x) - x_pos) catch unreachable;
 | 
			
		||||
            const y_diff: i32 = std.math.absInt(@bitCast(i8, y) -% @bitCast(i8, sprite.y())) catch unreachable;
 | 
			
		||||
 | 
			
		||||
            // Note that we flip the tile_pos not the (tile_pos % 8) like we do for
 | 
			
		||||
            // Background Tiles. By doing this we mirror the entire sprite instead of
 | 
			
		||||
            // just a specific tile (see how sprite.width and sprite.height are involved)
 | 
			
		||||
            const tile_y = y_diff ^ if (sprite.vFlip()) (sprite.height - 1) else 0;
 | 
			
		||||
            const tile_x = x_diff ^ if (sprite.hFlip()) (sprite.width - 1) else 0;
 | 
			
		||||
            const tile_x = @intCast(u9, x_diff) ^ if (sprite.hFlip()) (sprite.width - 1) else 0;
 | 
			
		||||
            const tile_y = @intCast(u8, y_diff) ^ if (sprite.vFlip()) (sprite.height - 1) else 0;
 | 
			
		||||
 | 
			
		||||
            const row = @truncate(u3, tile_y);
 | 
			
		||||
            const col = @truncate(u3, tile_x);
 | 
			
		||||
@@ -487,16 +475,17 @@ pub const Ppu = struct {
 | 
			
		||||
            aff_x += self.aff_bg[n - 2].pa;
 | 
			
		||||
            aff_y += self.aff_bg[n - 2].pc;
 | 
			
		||||
 | 
			
		||||
            if (!shouldDrawBackground(n, self.bld.cnt, &self.scanline, i)) continue;
 | 
			
		||||
            const x = @bitCast(u32, ix);
 | 
			
		||||
            const y = @bitCast(u32, iy);
 | 
			
		||||
 | 
			
		||||
            const win_bounds = self.windowBounds(@truncate(u9, x), @truncate(u8, y));
 | 
			
		||||
            if (!shouldDrawBackground(self, n, win_bounds, i)) continue;
 | 
			
		||||
 | 
			
		||||
            if (self.bg[n].cnt.display_overflow.read()) {
 | 
			
		||||
                ix = if (ix > px_width) @rem(ix, px_width) else if (ix < 0) px_width + @rem(ix, px_width) else ix;
 | 
			
		||||
                iy = if (iy > px_height) @rem(iy, px_height) else if (iy < 0) px_height + @rem(iy, px_height) else iy;
 | 
			
		||||
            } else if (ix > px_width or iy > px_height or ix < 0 or iy < 0) continue;
 | 
			
		||||
 | 
			
		||||
            const x = @bitCast(u32, ix);
 | 
			
		||||
            const y = @bitCast(u32, iy);
 | 
			
		||||
 | 
			
		||||
            const tile_id: u32 = self.vram.read(u8, screen_base + ((y / 8) * @bitCast(u32, tile_width) + (x / 8)));
 | 
			
		||||
            const row = y & 7;
 | 
			
		||||
            const col = x & 7;
 | 
			
		||||
@@ -506,7 +495,7 @@ pub const Ppu = struct {
 | 
			
		||||
 | 
			
		||||
            if (pal_id != 0) {
 | 
			
		||||
                const bgr555 = self.palette.read(u16, pal_id * 2);
 | 
			
		||||
                copyToBackgroundBuffer(n, self.bld.cnt, &self.scanline, i, bgr555);
 | 
			
		||||
                self.copyToBackgroundBuffer(n, win_bounds, i, bgr555);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -515,7 +504,7 @@ pub const Ppu = struct {
 | 
			
		||||
        self.aff_bg[n - 2].y_latch.? += self.aff_bg[n - 2].pd; // PD is added to BGxY
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn drawBackround(self: *Self, comptime n: u2) void {
 | 
			
		||||
    fn drawBackground(self: *Self, comptime n: u2) void {
 | 
			
		||||
        // A Tile in a charblock is a byte, while a Screen Entry is a halfword
 | 
			
		||||
 | 
			
		||||
        const char_base = 0x4000 * @as(u32, self.bg[n].cnt.char_base.read());
 | 
			
		||||
@@ -535,10 +524,11 @@ pub const Ppu = struct {
 | 
			
		||||
 | 
			
		||||
        var i: u32 = 0;
 | 
			
		||||
        while (i < width) : (i += 1) {
 | 
			
		||||
            if (!shouldDrawBackground(n, self.bld.cnt, &self.scanline, i)) continue;
 | 
			
		||||
 | 
			
		||||
            const x = hofs + i;
 | 
			
		||||
 | 
			
		||||
            const win_bounds = self.windowBounds(@truncate(u9, x), @truncate(u8, y));
 | 
			
		||||
            if (!shouldDrawBackground(self, n, win_bounds, i)) continue;
 | 
			
		||||
 | 
			
		||||
            // Grab the Screen Entry from VRAM
 | 
			
		||||
            const entry_addr = screen_base + tilemapOffset(size, x, y);
 | 
			
		||||
            const entry = @bitCast(ScreenEntry, self.vram.read(u16, entry_addr));
 | 
			
		||||
@@ -563,7 +553,7 @@ pub const Ppu = struct {
 | 
			
		||||
 | 
			
		||||
            if (pal_id != 0) {
 | 
			
		||||
                const bgr555 = self.palette.read(u16, pal_id * 2);
 | 
			
		||||
                copyToBackgroundBuffer(n, self.bld.cnt, &self.scanline, i, bgr555);
 | 
			
		||||
                self.copyToBackgroundBuffer(n, win_bounds, i, bgr555);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -589,10 +579,10 @@ pub const Ppu = struct {
 | 
			
		||||
                var layer: usize = 0;
 | 
			
		||||
                while (layer < 4) : (layer += 1) {
 | 
			
		||||
                    self.drawSprites(@truncate(u2, layer));
 | 
			
		||||
                    if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackround(0);
 | 
			
		||||
                    if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackround(1);
 | 
			
		||||
                    if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawBackround(2);
 | 
			
		||||
                    if (layer == self.bg[3].cnt.priority.read() and bg_enable >> 3 & 1 == 1) self.drawBackround(3);
 | 
			
		||||
                    if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackground(0);
 | 
			
		||||
                    if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackground(1);
 | 
			
		||||
                    if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawBackground(2);
 | 
			
		||||
                    if (layer == self.bg[3].cnt.priority.read() and bg_enable >> 3 & 1 == 1) self.drawBackground(3);
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                // Copy Drawn Scanline to Frame Buffer
 | 
			
		||||
@@ -617,8 +607,8 @@ pub const Ppu = struct {
 | 
			
		||||
                var layer: usize = 0;
 | 
			
		||||
                while (layer < 4) : (layer += 1) {
 | 
			
		||||
                    self.drawSprites(@truncate(u2, layer));
 | 
			
		||||
                    if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackround(0);
 | 
			
		||||
                    if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackround(1);
 | 
			
		||||
                    if (layer == self.bg[0].cnt.priority.read() and bg_enable & 1 == 1) self.drawBackground(0);
 | 
			
		||||
                    if (layer == self.bg[1].cnt.priority.read() and bg_enable >> 1 & 1 == 1) self.drawBackground(1);
 | 
			
		||||
                    if (layer == self.bg[2].cnt.priority.read() and bg_enable >> 2 & 1 == 1) self.drawAffineBackground(2);
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
@@ -696,7 +686,7 @@ pub const Ppu = struct {
 | 
			
		||||
                while (i < width) : (i += 1) {
 | 
			
		||||
                    // If we're outside of the bounds of mode 5, draw the background colour
 | 
			
		||||
                    const bgr555 =
 | 
			
		||||
                        if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.getBackdrop();
 | 
			
		||||
                        if (scanline < m5_height and i < m5_width) self.vram.read(u16, vram_base + i * @sizeOf(u16)) else self.palette.backdrop();
 | 
			
		||||
 | 
			
		||||
                    std.mem.writeIntNative(u32, self.framebuf.get(.Emulator)[fb_base + i * @sizeOf(u32) ..][0..@sizeOf(u32)], rgba888(bgr555));
 | 
			
		||||
                }
 | 
			
		||||
@@ -740,7 +730,94 @@ pub const Ppu = struct {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (maybe_top) |top| return top;
 | 
			
		||||
        return self.palette.getBackdrop();
 | 
			
		||||
        return self.palette.backdrop();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn copyToBackgroundBuffer(self: *Self, comptime n: u2, bounds: ?WindowBounds, i: usize, bgr555: u16) void {
 | 
			
		||||
        if (self.bld.cnt.mode.read() != 0b00) {
 | 
			
		||||
            // Standard Alpha Blending
 | 
			
		||||
            const a_layers = self.bld.cnt.layer_a.read();
 | 
			
		||||
            const is_blend_enabled = (a_layers >> n) & 1 == 1;
 | 
			
		||||
 | 
			
		||||
            // If Alpha Blending is enabled and we've found an eligible layer for
 | 
			
		||||
            // Pixel A, store the pixel in the bottom pixel buffer
 | 
			
		||||
 | 
			
		||||
            const win_part = if (bounds) |win| blk: {
 | 
			
		||||
                // Window Enabled
 | 
			
		||||
                break :blk switch (win) {
 | 
			
		||||
                    .win0 => self.win.in.w0_bld.read(),
 | 
			
		||||
                    .win1 => self.win.in.w1_bld.read(),
 | 
			
		||||
                    .out => self.win.out.out_bld.read(),
 | 
			
		||||
                };
 | 
			
		||||
            } else true;
 | 
			
		||||
 | 
			
		||||
            if (win_part and is_blend_enabled) {
 | 
			
		||||
                self.scanline.btm()[i] = bgr555;
 | 
			
		||||
                return;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        self.scanline.top()[i] = bgr555;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const WindowBounds = enum { win0, win1, out };
 | 
			
		||||
 | 
			
		||||
    fn windowBounds(self: *Self, x: u9, y: u8) ?WindowBounds {
 | 
			
		||||
        const win0 = self.dispcnt.win_enable.read() & 1 == 1;
 | 
			
		||||
        const win1 = (self.dispcnt.win_enable.read() >> 1) & 1 == 1;
 | 
			
		||||
        const winObj = self.dispcnt.obj_win_enable.read();
 | 
			
		||||
 | 
			
		||||
        if (!(win0 or win1 or winObj)) return null;
 | 
			
		||||
 | 
			
		||||
        if (win0 and self.win.inRange(0, x, y)) return .win0;
 | 
			
		||||
        if (win1 and self.win.inRange(1, x, y)) return .win1;
 | 
			
		||||
 | 
			
		||||
        return .out;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn shouldDrawBackground(self: *Self, comptime n: u2, bounds: ?WindowBounds, i: usize) bool {
 | 
			
		||||
        // If a pixel has been drawn on the top layer, it's because:
 | 
			
		||||
        // 1. The pixel is to be blended with a pixel on the bottom layer
 | 
			
		||||
        // 2. The pixel is not to be blended at all
 | 
			
		||||
        // Also, if we find a pixel on the top layer we don't need to bother with this I think?
 | 
			
		||||
        if (self.scanline.top()[i] != null) return false;
 | 
			
		||||
 | 
			
		||||
        if (bounds) |win| {
 | 
			
		||||
            switch (win) {
 | 
			
		||||
                .win0 => if ((self.win.in.w0_bg.read() >> n) & 1 == 0) return false,
 | 
			
		||||
                .win1 => if ((self.win.in.w1_bg.read() >> n) & 1 == 0) return false,
 | 
			
		||||
                .out => if ((self.win.out.out_bg.read() >> n) & 1 == 0) return false,
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (self.scanline.btm()[i] != null) {
 | 
			
		||||
            // The pixel found in the bottom layer is:
 | 
			
		||||
            // 1. From a higher priority background
 | 
			
		||||
            // 2. From a background that is marked for blending (Pixel A)
 | 
			
		||||
 | 
			
		||||
            // If Alpha Blending isn't enabled, then we've already found a higher prio
 | 
			
		||||
            // pixel, we can return early
 | 
			
		||||
            if (self.bld.cnt.mode.read() != 0b01) return false;
 | 
			
		||||
 | 
			
		||||
            const b_layers = self.bld.cnt.layer_b.read();
 | 
			
		||||
 | 
			
		||||
            const win_part = if (bounds) |win| blk: {
 | 
			
		||||
                // Window Enabled
 | 
			
		||||
                break :blk switch (win) {
 | 
			
		||||
                    .win0 => self.win.in.w0_bld.read(),
 | 
			
		||||
                    .win1 => self.win.in.w1_bld.read(),
 | 
			
		||||
                    .out => self.win.out.out_bld.read(),
 | 
			
		||||
                };
 | 
			
		||||
            } else true;
 | 
			
		||||
 | 
			
		||||
            // If the Background is not marked for blending, we've already found
 | 
			
		||||
            // a higher priority pixel, move on.
 | 
			
		||||
 | 
			
		||||
            const is_blend_enabled = win_part and ((b_layers >> n) & 1 == 1);
 | 
			
		||||
            if (!is_blend_enabled) return false;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // TODO: Comment this + get a better understanding
 | 
			
		||||
@@ -782,7 +859,7 @@ pub const Ppu = struct {
 | 
			
		||||
        // See if HBlank DMA is present and not enabled
 | 
			
		||||
 | 
			
		||||
        if (!self.dispstat.vblank.read())
 | 
			
		||||
            pollDmaOnBlank(cpu.bus, .HBlank);
 | 
			
		||||
            dma.onBlanking(cpu.bus, .HBlank);
 | 
			
		||||
 | 
			
		||||
        self.dispstat.hblank.set();
 | 
			
		||||
        self.sched.push(.HBlank, 68 * 4 -| late);
 | 
			
		||||
@@ -824,7 +901,7 @@ pub const Ppu = struct {
 | 
			
		||||
                self.aff_bg[1].latchRefPoints();
 | 
			
		||||
 | 
			
		||||
                // See if Vblank DMA is present and not enabled
 | 
			
		||||
                pollDmaOnBlank(cpu.bus, .VBlank);
 | 
			
		||||
                dma.onBlanking(cpu.bus, .VBlank);
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (scanline == 227) self.dispstat.vblank.unset();
 | 
			
		||||
@@ -833,158 +910,6 @@ pub const Ppu = struct {
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const Palette = struct {
 | 
			
		||||
    const palram_size = 0x400;
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
 | 
			
		||||
    buf: []u8,
 | 
			
		||||
    allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
    fn init(allocator: Allocator) !Self {
 | 
			
		||||
        const buf = try allocator.alloc(u8, palram_size);
 | 
			
		||||
        std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
        return Self{
 | 
			
		||||
            .buf = buf,
 | 
			
		||||
            .allocator = allocator,
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn deinit(self: *Self) void {
 | 
			
		||||
        self.allocator.free(self.buf);
 | 
			
		||||
        self.* = undefined;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn read(self: *const Self, comptime T: type, address: usize) T {
 | 
			
		||||
        const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
        return switch (T) {
 | 
			
		||||
            u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
 | 
			
		||||
            else => @compileError("PALRAM: Unsupported read width"),
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
 | 
			
		||||
        const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
        switch (T) {
 | 
			
		||||
            u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
 | 
			
		||||
            u8 => {
 | 
			
		||||
                const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
 | 
			
		||||
                std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
 | 
			
		||||
            },
 | 
			
		||||
            else => @compileError("PALRAM: Unsupported write width"),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn getBackdrop(self: *const Self) u16 {
 | 
			
		||||
        return self.read(u16, 0);
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
pub const Vram = struct {
 | 
			
		||||
    const vram_size = 0x18000;
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
 | 
			
		||||
    buf: []u8,
 | 
			
		||||
    allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
    fn init(allocator: Allocator) !Self {
 | 
			
		||||
        const buf = try allocator.alloc(u8, vram_size);
 | 
			
		||||
        std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
        return Self{
 | 
			
		||||
            .buf = buf,
 | 
			
		||||
            .allocator = allocator,
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn deinit(self: *Self) void {
 | 
			
		||||
        self.allocator.free(self.buf);
 | 
			
		||||
        self.* = undefined;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn read(self: *const Self, comptime T: type, address: usize) T {
 | 
			
		||||
        const addr = Self.mirror(address);
 | 
			
		||||
 | 
			
		||||
        return switch (T) {
 | 
			
		||||
            u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
 | 
			
		||||
            else => @compileError("VRAM: Unsupported read width"),
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
 | 
			
		||||
        const mode: u3 = dispcnt.bg_mode.read();
 | 
			
		||||
        const idx = Self.mirror(address);
 | 
			
		||||
 | 
			
		||||
        switch (T) {
 | 
			
		||||
            u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
 | 
			
		||||
            u8 => {
 | 
			
		||||
                // Ignore write if it falls within the boundaries of OBJ VRAM
 | 
			
		||||
                switch (mode) {
 | 
			
		||||
                    0, 1, 2 => if (0x0001_0000 <= idx) return,
 | 
			
		||||
                    else => if (0x0001_4000 <= idx) return,
 | 
			
		||||
                }
 | 
			
		||||
 | 
			
		||||
                const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
 | 
			
		||||
                std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
 | 
			
		||||
            },
 | 
			
		||||
            else => @compileError("VRAM: Unsupported write width"),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn mirror(address: usize) usize {
 | 
			
		||||
        // Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
 | 
			
		||||
        const addr = address & 0x1FFFF;
 | 
			
		||||
 | 
			
		||||
        // If the address is within 96K we don't do anything,
 | 
			
		||||
        // otherwise we want to mirror the last 32K (addresses between 64K and 96K)
 | 
			
		||||
        return if (addr < vram_size) addr else 0x10000 + (addr & 0x7FFF);
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const Oam = struct {
 | 
			
		||||
    const oam_size = 0x400;
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
 | 
			
		||||
    buf: []u8,
 | 
			
		||||
    allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
    fn init(allocator: Allocator) !Self {
 | 
			
		||||
        const buf = try allocator.alloc(u8, oam_size);
 | 
			
		||||
        std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
        return Self{
 | 
			
		||||
            .buf = buf,
 | 
			
		||||
            .allocator = allocator,
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn deinit(self: *Self) void {
 | 
			
		||||
        self.allocator.free(self.buf);
 | 
			
		||||
        self.* = undefined;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn read(self: *const Self, comptime T: type, address: usize) T {
 | 
			
		||||
        const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
        return switch (T) {
 | 
			
		||||
            u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
 | 
			
		||||
            else => @compileError("OAM: Unsupported read width"),
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
 | 
			
		||||
        const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
        switch (T) {
 | 
			
		||||
            u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
 | 
			
		||||
            u8 => return, // 8-bit writes are explicitly ignored
 | 
			
		||||
            else => @compileError("OAM: Unsupported write width"),
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const Blend = struct {
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
 | 
			
		||||
@@ -1036,6 +961,36 @@ const Window = struct {
 | 
			
		||||
        return self.out.raw & 0x3F3F;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn inRange(self: *const Self, comptime id: u1, x: u9, y: u8) bool {
 | 
			
		||||
        const winh = self.h[id];
 | 
			
		||||
        const winv = self.v[id];
 | 
			
		||||
 | 
			
		||||
        if (isYInRange(winv, y)) {
 | 
			
		||||
            const x1 = winh.x1.read();
 | 
			
		||||
            const x2 = winh.x2.read();
 | 
			
		||||
 | 
			
		||||
            // Within X Bounds
 | 
			
		||||
            return if (x1 < x2) blk: {
 | 
			
		||||
                break :blk x >= x1 and x < x2;
 | 
			
		||||
            } else blk: {
 | 
			
		||||
                break :blk x >= x1 or x < x2;
 | 
			
		||||
            };
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    inline fn isYInRange(winv: io.WinV, y: u9) bool {
 | 
			
		||||
        const y1 = winv.y1.read();
 | 
			
		||||
        const y2 = winv.y2.read();
 | 
			
		||||
 | 
			
		||||
        if (y1 < y2) {
 | 
			
		||||
            return y >= y1 and y < y2;
 | 
			
		||||
        } else {
 | 
			
		||||
            return y >= y1 or y < y2;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn setH(self: *Self, value: u32) void {
 | 
			
		||||
        self.h[0].raw = @truncate(u16, value);
 | 
			
		||||
        self.h[1].raw = @truncate(u16, value >> 16);
 | 
			
		||||
@@ -1343,37 +1298,6 @@ fn alphaBlend(top: u16, btm: u16, bldalpha: io.BldAlpha) u16 {
 | 
			
		||||
    return (bld_b << 10) | (bld_g << 5) | bld_r;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn shouldDrawBackground(comptime n: u2, bldcnt: io.BldCnt, scanline: *Scanline, i: usize) bool {
 | 
			
		||||
    // If a pixel has been drawn on the top layer, it's because
 | 
			
		||||
    // Either the pixel is to be blended with a pixel on the bottom layer
 | 
			
		||||
    // or the pixel is not to be blended at all
 | 
			
		||||
    // Consequentially, if we find a pixel on the top layer, there's no need
 | 
			
		||||
    // to render anything I think?
 | 
			
		||||
    if (scanline.top()[i] != null) return false;
 | 
			
		||||
 | 
			
		||||
    if (scanline.btm()[i] != null) {
 | 
			
		||||
        // The Pixel found in the Bottom layer is
 | 
			
		||||
        // 1. From a higher priority
 | 
			
		||||
        // 2. From a Backround that is marked for Blending (Pixel A)
 | 
			
		||||
        //
 | 
			
		||||
        // We now have to confirm whether this current Background can be used
 | 
			
		||||
        // as Pixel B or not.
 | 
			
		||||
 | 
			
		||||
        // If Alpha Blending isn't enabled, we've aready found a higher
 | 
			
		||||
        // priority pixel to render. Move on
 | 
			
		||||
        if (bldcnt.mode.read() != 0b01) return false;
 | 
			
		||||
 | 
			
		||||
        const b_layers = bldcnt.layer_b.read();
 | 
			
		||||
        const is_blend_enabled = (b_layers >> n) & 1 == 1;
 | 
			
		||||
 | 
			
		||||
        // If the Background is not marked for blending, we've already found
 | 
			
		||||
        // a higher priority pixel, move on.
 | 
			
		||||
        if (!is_blend_enabled) return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn shouldDrawSprite(bldcnt: io.BldCnt, scanline: *Scanline, x: u9) bool {
 | 
			
		||||
    if (scanline.top()[x] != null) return false;
 | 
			
		||||
 | 
			
		||||
@@ -1388,23 +1312,6 @@ fn shouldDrawSprite(bldcnt: io.BldCnt, scanline: *Scanline, x: u9) bool {
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn copyToBackgroundBuffer(comptime n: u2, bldcnt: io.BldCnt, scanline: *Scanline, i: usize, bgr555: u16) void {
 | 
			
		||||
    if (bldcnt.mode.read() != 0b00) {
 | 
			
		||||
        // Standard Alpha Blending
 | 
			
		||||
        const a_layers = bldcnt.layer_a.read();
 | 
			
		||||
        const is_blend_enabled = (a_layers >> n) & 1 == 1;
 | 
			
		||||
 | 
			
		||||
        // If Alpha Blending is enabled and we've found an eligible layer for
 | 
			
		||||
        // Pixel A, store the pixel in the bottom pixel buffer
 | 
			
		||||
        if (is_blend_enabled) {
 | 
			
		||||
            scanline.btm()[i] = bgr555;
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    scanline.top()[i] = bgr555;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fn copyToSpriteBuffer(bldcnt: io.BldCnt, scanline: *Scanline, x: u9, bgr555: u16) void {
 | 
			
		||||
    if (bldcnt.mode.read() != 0b00) {
 | 
			
		||||
        // Alpha Blending
 | 
			
		||||
@@ -1457,48 +1364,3 @@ const Scanline = struct {
 | 
			
		||||
        return self.layers[1];
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Double Buffering Implementation
 | 
			
		||||
const FrameBuffer = struct {
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
 | 
			
		||||
    layers: [2][]u8,
 | 
			
		||||
    buf: []u8,
 | 
			
		||||
    current: u1,
 | 
			
		||||
 | 
			
		||||
    allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
    // TODO: Rename
 | 
			
		||||
    const Device = enum {
 | 
			
		||||
        Emulator,
 | 
			
		||||
        Renderer,
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    pub fn init(allocator: Allocator) !Self {
 | 
			
		||||
        const framebuf_len = framebuf_pitch * height;
 | 
			
		||||
        const buf = try allocator.alloc(u8, framebuf_len * 2);
 | 
			
		||||
        std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
        return .{
 | 
			
		||||
            // Front and Back Framebuffers
 | 
			
		||||
            .layers = [_][]u8{ buf[0..][0..framebuf_len], buf[framebuf_len..][0..framebuf_len] },
 | 
			
		||||
            .buf = buf,
 | 
			
		||||
            .current = 0,
 | 
			
		||||
 | 
			
		||||
            .allocator = allocator,
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    fn deinit(self: *Self) void {
 | 
			
		||||
        self.allocator.free(self.buf);
 | 
			
		||||
        self.* = undefined;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn swap(self: *Self) void {
 | 
			
		||||
        self.current = ~self.current;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn get(self: *Self, comptime dev: Device) []u8 {
 | 
			
		||||
        return self.layers[if (dev == .Emulator) self.current else ~self.current];
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										40
									
								
								src/core/ppu/Oam.zig
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								src/core/ppu/Oam.zig
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,40 @@
 | 
			
		||||
const std = @import("std");
 | 
			
		||||
 | 
			
		||||
const Allocator = std.mem.Allocator;
 | 
			
		||||
 | 
			
		||||
const buf_len = 0x400;
 | 
			
		||||
const Self = @This();
 | 
			
		||||
 | 
			
		||||
buf: []u8,
 | 
			
		||||
allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
 | 
			
		||||
    const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
    return switch (T) {
 | 
			
		||||
        u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
 | 
			
		||||
        else => @compileError("OAM: Unsupported read width"),
 | 
			
		||||
    };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
 | 
			
		||||
    const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
    switch (T) {
 | 
			
		||||
        u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
 | 
			
		||||
        u8 => return, // 8-bit writes are explicitly ignored
 | 
			
		||||
        else => @compileError("OAM: Unsupported write width"),
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn init(allocator: Allocator) !Self {
 | 
			
		||||
    const buf = try allocator.alloc(u8, buf_len);
 | 
			
		||||
    std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
    return Self{ .buf = buf, .allocator = allocator };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn deinit(self: *Self) void {
 | 
			
		||||
    self.allocator.free(self.buf);
 | 
			
		||||
    self.* = undefined;
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										47
									
								
								src/core/ppu/Palette.zig
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								src/core/ppu/Palette.zig
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,47 @@
 | 
			
		||||
const std = @import("std");
 | 
			
		||||
 | 
			
		||||
const Allocator = std.mem.Allocator;
 | 
			
		||||
 | 
			
		||||
const buf_len = 0x400;
 | 
			
		||||
const Self = @This();
 | 
			
		||||
 | 
			
		||||
buf: []u8,
 | 
			
		||||
allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
 | 
			
		||||
    const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
    return switch (T) {
 | 
			
		||||
        u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
 | 
			
		||||
        else => @compileError("PALRAM: Unsupported read width"),
 | 
			
		||||
    };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn write(self: *Self, comptime T: type, address: usize, value: T) void {
 | 
			
		||||
    const addr = address & 0x3FF;
 | 
			
		||||
 | 
			
		||||
    switch (T) {
 | 
			
		||||
        u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)], value),
 | 
			
		||||
        u8 => {
 | 
			
		||||
            const align_addr = addr & ~@as(u32, 1); // Aligned to Halfword boundary
 | 
			
		||||
            std.mem.writeIntSliceLittle(u16, self.buf[align_addr..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
 | 
			
		||||
        },
 | 
			
		||||
        else => @compileError("PALRAM: Unsupported write width"),
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn init(allocator: Allocator) !Self {
 | 
			
		||||
    const buf = try allocator.alloc(u8, buf_len);
 | 
			
		||||
    std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
    return Self{ .buf = buf, .allocator = allocator };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn deinit(self: *Self) void {
 | 
			
		||||
    self.allocator.free(self.buf);
 | 
			
		||||
    self.* = undefined;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn backdrop(self: *const Self) u16 {
 | 
			
		||||
    return self.read(u16, 0);
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										60
									
								
								src/core/ppu/Vram.zig
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								src/core/ppu/Vram.zig
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,60 @@
 | 
			
		||||
const std = @import("std");
 | 
			
		||||
const io = @import("../bus/io.zig");
 | 
			
		||||
 | 
			
		||||
const Allocator = std.mem.Allocator;
 | 
			
		||||
 | 
			
		||||
const buf_len = 0x18000;
 | 
			
		||||
const Self = @This();
 | 
			
		||||
 | 
			
		||||
buf: []u8,
 | 
			
		||||
allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
pub fn read(self: *const Self, comptime T: type, address: usize) T {
 | 
			
		||||
    const addr = Self.mirror(address);
 | 
			
		||||
 | 
			
		||||
    return switch (T) {
 | 
			
		||||
        u32, u16, u8 => std.mem.readIntSliceLittle(T, self.buf[addr..][0..@sizeOf(T)]),
 | 
			
		||||
        else => @compileError("VRAM: Unsupported read width"),
 | 
			
		||||
    };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn write(self: *Self, comptime T: type, dispcnt: io.DisplayControl, address: usize, value: T) void {
 | 
			
		||||
    const mode: u3 = dispcnt.bg_mode.read();
 | 
			
		||||
    const idx = Self.mirror(address);
 | 
			
		||||
 | 
			
		||||
    switch (T) {
 | 
			
		||||
        u32, u16 => std.mem.writeIntSliceLittle(T, self.buf[idx..][0..@sizeOf(T)], value),
 | 
			
		||||
        u8 => {
 | 
			
		||||
            // Ignore write if it falls within the boundaries of OBJ VRAM
 | 
			
		||||
            switch (mode) {
 | 
			
		||||
                0, 1, 2 => if (0x0001_0000 <= idx) return,
 | 
			
		||||
                else => if (0x0001_4000 <= idx) return,
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            const align_idx = idx & ~@as(u32, 1); // Aligned to a halfword boundary
 | 
			
		||||
            std.mem.writeIntSliceLittle(u16, self.buf[align_idx..][0..@sizeOf(u16)], @as(u16, value) * 0x101);
 | 
			
		||||
        },
 | 
			
		||||
        else => @compileError("VRAM: Unsupported write width"),
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn init(allocator: Allocator) !Self {
 | 
			
		||||
    const buf = try allocator.alloc(u8, buf_len);
 | 
			
		||||
    std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
    return Self{ .buf = buf, .allocator = allocator };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn deinit(self: *Self) void {
 | 
			
		||||
    self.allocator.free(self.buf);
 | 
			
		||||
    self.* = undefined;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
pub fn mirror(address: usize) usize {
 | 
			
		||||
    // Mirrored in steps of 128K (64K + 32K + 32K) (abcc)
 | 
			
		||||
    const addr = address & 0x1FFFF;
 | 
			
		||||
 | 
			
		||||
    // If the address is within 96K we don't do anything,
 | 
			
		||||
    // otherwise we want to mirror the last 32K (addresses between 64K and 96K)
 | 
			
		||||
    return if (addr < buf_len) addr else 0x10000 + (addr & 0x7FFF);
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										46
									
								
								src/util.zig
									
									
									
									
									
								
							
							
						
						
									
										46
									
								
								src/util.zig
									
									
									
									
									
								
							@@ -5,6 +5,8 @@ const config = @import("config.zig");
 | 
			
		||||
const Log2Int = std.math.Log2Int;
 | 
			
		||||
const Arm7tdmi = @import("core/cpu.zig").Arm7tdmi;
 | 
			
		||||
 | 
			
		||||
const Allocator = std.mem.Allocator;
 | 
			
		||||
 | 
			
		||||
// Sign-Extend value of type `T` to type `U`
 | 
			
		||||
pub fn sext(comptime T: type, comptime U: type, value: T) T {
 | 
			
		||||
    // U must have less bits than T
 | 
			
		||||
@@ -174,6 +176,7 @@ pub const io = struct {
 | 
			
		||||
 | 
			
		||||
pub const Logger = struct {
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
    const FmtArgTuple = std.meta.Tuple(&.{ u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32 });
 | 
			
		||||
 | 
			
		||||
    buf: std.io.BufferedWriter(4096 << 2, std.fs.File.Writer),
 | 
			
		||||
 | 
			
		||||
@@ -232,8 +235,6 @@ pub const Logger = struct {
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const FmtArgTuple = std.meta.Tuple(&.{ u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32 });
 | 
			
		||||
 | 
			
		||||
pub const audio = struct {
 | 
			
		||||
    const _io = @import("core/bus/io.zig");
 | 
			
		||||
 | 
			
		||||
@@ -326,3 +327,44 @@ fn HalfInt(comptime T: type) type {
 | 
			
		||||
 | 
			
		||||
    return std.meta.Int(type_info.Int.signedness, type_info.Int.bits >> 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/// Double Buffering Implementation
 | 
			
		||||
pub const FrameBuffer = struct {
 | 
			
		||||
    const Self = @This();
 | 
			
		||||
 | 
			
		||||
    layers: [2][]u8,
 | 
			
		||||
    buf: []u8,
 | 
			
		||||
    current: u1,
 | 
			
		||||
 | 
			
		||||
    allocator: Allocator,
 | 
			
		||||
 | 
			
		||||
    // TODO: Rename
 | 
			
		||||
    const Device = enum { Emulator, Renderer };
 | 
			
		||||
 | 
			
		||||
    pub fn init(allocator: Allocator, comptime len: comptime_int) !Self {
 | 
			
		||||
        const buf = try allocator.alloc(u8, len * 2);
 | 
			
		||||
        std.mem.set(u8, buf, 0);
 | 
			
		||||
 | 
			
		||||
        return .{
 | 
			
		||||
            // Front and Back Framebuffers
 | 
			
		||||
            .layers = [_][]u8{ buf[0..][0..len], buf[len..][0..len] },
 | 
			
		||||
            .buf = buf,
 | 
			
		||||
            .current = 0,
 | 
			
		||||
 | 
			
		||||
            .allocator = allocator,
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn deinit(self: *Self) void {
 | 
			
		||||
        self.allocator.free(self.buf);
 | 
			
		||||
        self.* = undefined;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn swap(self: *Self) void {
 | 
			
		||||
        self.current = ~self.current;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    pub fn get(self: *Self, comptime dev: Device) []u8 {
 | 
			
		||||
        return self.layers[if (dev == .Emulator) self.current else ~self.current];
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user