feat: update for Zig v0.14.0
This commit is contained in:
parent
d72a39006a
commit
57e1b7e42f
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,3 +1,2 @@
|
||||
zig-out/
|
||||
zig-cache/
|
||||
.zig-cache/
|
||||
|
29
build.zig
29
build.zig
@ -15,8 +15,12 @@ pub fn build(b: *std.Build) void {
|
||||
// set a preferred release mode, allowing the user to decide how to optimize.
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const lib = b.addStaticLibrary(.{
|
||||
.name = "bit-string",
|
||||
// This creates a "module", which represents a collection of source files alongside
|
||||
// some compilation options, such as optimization mode and linked system libraries.
|
||||
// Every executable or library we compile will be based on one or more modules.
|
||||
const lib_mod = b.createModule(.{
|
||||
// `root_source_file` is the Zig "entry point" of the module. If a module
|
||||
// only contains e.g. external object files, you can make this `null`.
|
||||
// In this case the main source file is merely a path, however, in more
|
||||
// complicated build scripts, this could be a generated file.
|
||||
.root_source_file = b.path("src/lib.zig"),
|
||||
@ -24,10 +28,13 @@ pub fn build(b: *std.Build) void {
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
const docs = b.addInstallDirectory(.{
|
||||
.install_dir = .prefix,
|
||||
.install_subdir = "docs",
|
||||
.source_dir = lib.getEmittedDocs(),
|
||||
// Now, we will create a static library based on the module we created above.
|
||||
// This creates a `std.Build.Step.Compile`, which is the build step responsible
|
||||
// for actually invoking the compiler.
|
||||
const lib = b.addLibrary(.{
|
||||
.linkage = .static,
|
||||
.name = "bit_string",
|
||||
.root_module = lib_mod,
|
||||
});
|
||||
|
||||
// This declares intent for the library to be installed into the standard
|
||||
@ -35,12 +42,16 @@ pub fn build(b: *std.Build) void {
|
||||
// running `zig build`).
|
||||
b.installArtifact(lib);
|
||||
|
||||
const docs = b.addInstallDirectory(.{
|
||||
.install_dir = .prefix,
|
||||
.install_subdir = "docs",
|
||||
.source_dir = lib.getEmittedDocs(),
|
||||
});
|
||||
|
||||
// Creates a step for unit testing. This only builds the test executable
|
||||
// but does not run it.
|
||||
const lib_unit_tests = b.addTest(.{
|
||||
.root_source_file = b.path("src/test.zig"),
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
.root_module = lib_mod,
|
||||
});
|
||||
|
||||
const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests);
|
||||
|
@ -6,16 +6,29 @@
|
||||
//
|
||||
// It is redundant to include "zig" in this name because it is already
|
||||
// within the Zig package namespace.
|
||||
.name = "bit-string",
|
||||
.name = .bit_string,
|
||||
|
||||
// This is a [Semantic Version](https://semver.org/).
|
||||
// In a future version of Zig it will be used for package deduplication.
|
||||
.version = "0.1.0",
|
||||
|
||||
// This field is optional.
|
||||
// This is currently advisory only; Zig does not yet do anything
|
||||
// with this value.
|
||||
//.minimum_zig_version = "0.11.0",
|
||||
// Together with name, this represents a globally unique package
|
||||
// identifier. This field is generated by the Zig toolchain when the
|
||||
// package is first created, and then *never changes*. This allows
|
||||
// unambiguous detection of one package being an updated version of
|
||||
// another.
|
||||
//
|
||||
// When forking a Zig project, this id should be regenerated (delete the
|
||||
// field and run `zig build`) if the upstream project is still maintained.
|
||||
// Otherwise, the fork is *hostile*, attempting to take control over the
|
||||
// original project's identity. Thus it is recommended to leave the comment
|
||||
// on the following line intact, so that it shows up in code reviews that
|
||||
// modify the field.
|
||||
.fingerprint = 0x14a39efa6e959493, // Changing this has security and trust implications.
|
||||
|
||||
// Tracks the earliest Zig version that the package considers to be a
|
||||
// supported use case.
|
||||
.minimum_zig_version = "0.14.0",
|
||||
|
||||
// This field is optional.
|
||||
// Each dependency must either provide a `url` and `hash`, or a `path`.
|
||||
@ -27,7 +40,8 @@
|
||||
//.example = .{
|
||||
// // When updating this field to a new URL, be sure to delete the corresponding
|
||||
// // `hash`, otherwise you are communicating that you expect to find the old hash at
|
||||
// // the new URL.
|
||||
// // the new URL. If the contents of a URL change this will result in a hash mismatch
|
||||
// // which will prevent zig from using it.
|
||||
// .url = "https://example.com/foo.tar.gz",
|
||||
//
|
||||
// // This is computed from the file contents of the directory of files that is
|
||||
@ -45,7 +59,7 @@
|
||||
// // build root. In this case the package's hash is irrelevant and therefore not
|
||||
// // computed. This field and `url` are mutually exclusive.
|
||||
// .path = "foo",
|
||||
|
||||
//
|
||||
// // When this is set to `true`, a package is declared to be lazily
|
||||
// // fetched. This makes the dependency only get fetched if it is
|
||||
// // actually used.
|
||||
|
32
src/lib.zig
32
src/lib.zig
@ -52,7 +52,7 @@ pub fn match(comptime bit_string: []const u8, value: anytype) bool {
|
||||
comptime verify(ValT, bit_string);
|
||||
|
||||
const masks: struct { ValT, ValT } = comptime blk: {
|
||||
const bit_count = @typeInfo(ValT).Int.bits;
|
||||
const bit_count = @typeInfo(ValT).int.bits;
|
||||
|
||||
var set: ValT = 0;
|
||||
var clr: ValT = 0;
|
||||
@ -123,13 +123,13 @@ pub fn extract(comptime bit_string: []const u8, value: anytype) Bitfield(bit_str
|
||||
|
||||
var ret: ReturnT = undefined;
|
||||
|
||||
inline for (@typeInfo(ReturnT).Struct.fields) |field| {
|
||||
inline for (@typeInfo(ReturnT).@"struct".fields) |field| {
|
||||
@field(ret, field.name) = blk: {
|
||||
var masked_val: ValT = 0;
|
||||
var offset: usize = 0; // FIXME(URGENT): this whole block should be happening at comptime...
|
||||
|
||||
for (bit_string, 0..) |char, i| {
|
||||
const rev = @typeInfo(ValT).Int.bits - 1 - (i - offset);
|
||||
const rev = @typeInfo(ValT).int.bits - 1 - (i - offset);
|
||||
|
||||
switch (char) {
|
||||
'_' => offset += 1,
|
||||
@ -139,7 +139,7 @@ pub fn extract(comptime bit_string: []const u8, value: anytype) Bitfield(bit_str
|
||||
}
|
||||
}
|
||||
|
||||
const PextT = if (@typeInfo(ValT).Int.bits > 32) u64 else u32;
|
||||
const PextT = if (@typeInfo(ValT).int.bits > 32) u64 else u32;
|
||||
const use_hw = bmi2 and !@inComptime();
|
||||
|
||||
break :blk @truncate(if (use_hw) pext.hw(PextT, value, masked_val) else pext.sw(PextT, value, masked_val));
|
||||
@ -197,19 +197,19 @@ test extract {
|
||||
const ret = extract("--------", @as(u8, 0b00000000));
|
||||
const T = @TypeOf(ret);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 0), @typeInfo(T).Struct.fields.len);
|
||||
try std.testing.expectEqual(@as(usize, 0), @typeInfo(T).@"struct".fields.len);
|
||||
}
|
||||
{
|
||||
const ret = extract("00000000", @as(u8, 0b00000000));
|
||||
const T = @TypeOf(ret);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 0), @typeInfo(T).Struct.fields.len);
|
||||
try std.testing.expectEqual(@as(usize, 0), @typeInfo(T).@"struct".fields.len);
|
||||
}
|
||||
{
|
||||
const ret = extract("0-0-0-0-", @as(u8, 0b01010101));
|
||||
const T = @TypeOf(ret);
|
||||
|
||||
try std.testing.expectEqual(@as(usize, 0), @typeInfo(T).Struct.fields.len);
|
||||
try std.testing.expectEqual(@as(usize, 0), @typeInfo(T).@"struct".fields.len);
|
||||
}
|
||||
|
||||
{
|
||||
@ -272,12 +272,12 @@ pub fn Bitfield(comptime bit_string: []const u8) type {
|
||||
}
|
||||
|
||||
for (things, &tmp) |th, *field| {
|
||||
const FieldInt = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = th.bits } });
|
||||
const FieldInt = @Type(.{ .int = .{ .signedness = .unsigned, .bits = th.bits } });
|
||||
|
||||
field.* = .{
|
||||
.name = &.{th.char.?},
|
||||
.type = FieldInt,
|
||||
.default_value = null,
|
||||
.default_value_ptr = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(FieldInt),
|
||||
};
|
||||
@ -286,7 +286,7 @@ pub fn Bitfield(comptime bit_string: []const u8) type {
|
||||
break :blk tmp;
|
||||
};
|
||||
|
||||
return @Type(.{ .Struct = .{
|
||||
return @Type(.{ .@"struct" = .{
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
@ -297,16 +297,16 @@ pub fn Bitfield(comptime bit_string: []const u8) type {
|
||||
fn verify(comptime T: type, comptime bit_string: []const u8) void {
|
||||
const info = @typeInfo(T);
|
||||
|
||||
std.debug.assert(info != .ComptimeInt);
|
||||
std.debug.assert(info.Int.signedness == .unsigned);
|
||||
std.debug.assert(info.Int.bits <= 64); // x86 PEXT u32 and u64 operands only
|
||||
std.debug.assert(info != .comptime_int);
|
||||
std.debug.assert(info.int.signedness == .unsigned);
|
||||
std.debug.assert(info.int.bits <= 64); // x86 PEXT u32 and u64 operands only
|
||||
|
||||
var underscore_count = 0;
|
||||
for (bit_string) |c| {
|
||||
if (c == '_') underscore_count += 1;
|
||||
}
|
||||
|
||||
std.debug.assert((bit_string.len - underscore_count) == info.Int.bits);
|
||||
std.debug.assert((bit_string.len - underscore_count) == info.int.bits);
|
||||
}
|
||||
|
||||
const pext = struct {
|
||||
@ -333,7 +333,7 @@ const pext = struct {
|
||||
u32, u64 => {
|
||||
// code source: https://stackoverflow.com/questions/41720249/detecting-matching-bits-in-c
|
||||
// TODO: rewrite more in generic/idiomatic zig
|
||||
const log2_bits = @typeInfo(Log2Int(T)).Int.bits;
|
||||
const log2_bits = @typeInfo(Log2Int(T)).int.bits;
|
||||
|
||||
var val: T = value & mask; // immediately clear irrelevant bits
|
||||
var msk: T = mask;
|
||||
@ -369,7 +369,7 @@ const pext = struct {
|
||||
|
||||
switch (builtin.cpu.arch) {
|
||||
.x86_64 => if (std.Target.x86.featureSetHas(builtin.cpu.features, .bmi2)) {
|
||||
var rand_impl = std.rand.DefaultPrng.init(0xBAADF00D_DEADCAFE);
|
||||
var rand_impl = std.Random.DefaultPrng.init(0xBAADF00D_DEADCAFE);
|
||||
|
||||
for (0..100) |_| {
|
||||
const value = rand_impl.random().int(u32);
|
||||
|
Loading…
x
Reference in New Issue
Block a user