diff --git a/src/cpu/arm/data_processing.zig b/src/cpu/arm/data_processing.zig index cc3ea64..834d041 100644 --- a/src/cpu/arm/data_processing.zig +++ b/src/cpu/arm/data_processing.zig @@ -145,7 +145,7 @@ fn armSbc(comptime S: bool, cpu: *Arm7tdmi, rd: u4, left: u32, right: u32, old_c pub fn sbc(comptime S: bool, cpu: *Arm7tdmi, left: u32, right: u32, old_carry: u1) u32 { // TODO: Make your own version (thanks peach.bot) - const subtrahend = @as(u64, right) - old_carry + 1; + const subtrahend = @as(u64, right) -% old_carry +% 1; const result = @truncate(u32, left -% subtrahend); if (S) { diff --git a/src/cpu/arm/multiply_long.zig b/src/cpu/arm/multiply_long.zig index 89c0652..c6de693 100644 --- a/src/cpu/arm/multiply_long.zig +++ b/src/cpu/arm/multiply_long.zig @@ -15,14 +15,14 @@ pub fn multiplyLong(comptime U: bool, comptime A: bool, comptime S: bool) InstrF if (U) { // Signed (WHY IS IT U THEN?) var result: i64 = @as(i64, @bitCast(i32, cpu.r[rm])) * @as(i64, @bitCast(i32, cpu.r[rs])); - if (A) result += @bitCast(i64, @as(u64, cpu.r[rd_hi]) << 32 | @as(u64, cpu.r[rd_lo])); + if (A) result +%= @bitCast(i64, @as(u64, cpu.r[rd_hi]) << 32 | @as(u64, cpu.r[rd_lo])); cpu.r[rd_hi] = @bitCast(u32, @truncate(i32, result >> 32)); cpu.r[rd_lo] = @bitCast(u32, @truncate(i32, result)); } else { // Unsigned var result: u64 = @as(u64, cpu.r[rm]) * @as(u64, cpu.r[rs]); - if (A) result += @as(u64, cpu.r[rd_hi]) << 32 | @as(u64, cpu.r[rd_lo]); + if (A) result +%= @as(u64, cpu.r[rd_hi]) << 32 | @as(u64, cpu.r[rd_lo]); cpu.r[rd_hi] = @truncate(u32, result >> 32); cpu.r[rd_lo] = @truncate(u32, result);